code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=10 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = crop_size
def snake_case_( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = VivitImageProcessor if is_vision_available() else None
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self )
@property
def snake_case_( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_( self ) -> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_( self ) -> str:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__UpperCamelCase : int = "CIDAS/clipseg-rd64-refined"
__UpperCamelCase : Tuple = "image_segmenter"
__UpperCamelCase : List[str] = CLIPSegForImageSegmentation
__UpperCamelCase : Dict = ["image", "text"]
__UpperCamelCase : Union[str, Any] = ["image"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase : Tuple = self.model(**__SCREAMING_SNAKE_CASE ).logits
return logits
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = outputs.cpu().detach().numpy()
UpperCamelCase : Any = 0
UpperCamelCase : Tuple = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 315 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 | 1 |
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
'''simple docstring'''
def _A ( lowercase__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 164 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = DDIMPipeline
UpperCAmelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase_ :Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCAmelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCAmelCase_ :int = DDIMScheduler()
lowerCAmelCase_ :Any = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> Optional[Any]:
if str(__lowercase ).startswith("""mps""" ):
lowerCAmelCase_ :List[Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ :List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ :List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = '''cpu'''
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__lowercase )
lowerCAmelCase_ :Union[str, Any] = pipe(**__lowercase ).images
lowerCAmelCase_ :List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase_ :int = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
lowerCAmelCase_ :str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = '''google/ddpm-cifar10-32'''
lowerCAmelCase_ :Optional[Any] = UNetaDModel.from_pretrained(__lowercase )
lowerCAmelCase_ :Dict = DDIMScheduler()
lowerCAmelCase_ :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddim.to(__lowercase )
ddim.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = ddim(generator=__lowercase , eta=0.0 , output_type="""numpy""" ).images
lowerCAmelCase_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = '''google/ddpm-ema-bedroom-256'''
lowerCAmelCase_ :str = UNetaDModel.from_pretrained(__lowercase )
lowerCAmelCase_ :int = DDIMScheduler.from_pretrained(__lowercase )
lowerCAmelCase_ :Any = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddpm.to(__lowercase )
ddpm.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Any = ddpm(generator=__lowercase , output_type="""numpy""" ).images
lowerCAmelCase_ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ :Dict = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 366 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
lowerCAmelCase__ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
lowerCAmelCase__ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
if return_pvalue:
_lowerCamelCase : List[Any] = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 72 | """simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _lowercase):
def __init__( self : List[Any] , __UpperCamelCase : VQModel , __UpperCamelCase : UNetaDModel , __UpperCamelCase : DDIMScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 50 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
_UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_UpperCamelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCamelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VAE
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 256 | 0 |
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0_0 ) -> List[str]:
return sum(e for e in range(3 , _a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 361 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MobileViTFeatureExtractor']
lowerCAmelCase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_A: List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
_A: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
_A: Optional[int] = ['''key_proj''', '''value_proj''', '''query_proj''']
_A: List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_A: List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
_A: Optional[int] = prophet
_A: Tuple = prophet_old
else:
_A: Tuple = prophet.prophetnet
_A: Any = prophet_old.model
_A: int = False
for attribute in attributes:
if attribute in mapping:
_A: Optional[int] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
_A: int = attribute
elif hasattr(a , a ):
_A: Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_A: Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_A: Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_A: str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_A: Dict = True
break
elif attribute in special_keys and hasattr(a , '''in_proj_weight''' ):
_A: Optional[int] = old_model.in_proj_weight.shape[0] // 3
_A: Tuple = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_A: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_A: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_A: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_A: Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_A: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_A: int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_A: Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_A: Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_A: List[Any] = True
break
if attribute.isdigit():
_A: Tuple = model[int(a )]
_A: int = old_model[int(a )]
else:
_A: Union[str, Any] = getattr(a , a )
if old_attribute == "":
_A: Union[str, Any] = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_A: List[Any] = getattr(a , a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 121 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 157 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A , _A = image[0].size
_A , _A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_A = np.concatenate(_snake_case , axis=0 )
_A = np.array(_snake_case ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple:
'''simple docstring'''
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_A , _A = mask[0].size
_A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_A = np.concatenate(_snake_case , axis=0 )
_A = mask.astype(np.floataa ) / 255.0
_A = 0
_A = 1
_A = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return mask
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : RePaintScheduler
def __init__( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : int = 250 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = image
_A = _preprocess_image(_UpperCAmelCase )
_A = original_image.to(device=self.device , dtype=self.unet.dtype )
_A = _preprocess_mask(_UpperCAmelCase )
_A = mask_image.to(device=self.device , dtype=self.unet.dtype )
_A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = original_image.shape
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.device )
_A = eta
_A = self.scheduler.timesteps[0] + 1
_A = generator[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_A = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_A = self.scheduler.undo_step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = t
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 315 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""KEY""")
__UpperCAmelCase = TypeVar("""VAL""")
@dataclass(frozen=a_ , slots=a_ )
class SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
"""simple docstring"""
lowerCamelCase : List[Any] =42
lowerCamelCase : Union[str, Any] =42
class SCREAMING_SNAKE_CASE ( _Item ):
"""simple docstring"""
def __init__( self : str ) -> None:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __bool__( self : Tuple ) -> bool:
"""simple docstring"""
return False
__UpperCAmelCase = _DeletedItem()
class SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase : List[Any] = 8 , lowerCAmelCase : Optional[Any] = 0.75 ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = initial_block_size
__lowerCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCAmelCase : Optional[Any] = capacity_factor
__lowerCAmelCase : Union[str, Any] = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
return hash(_SCREAMING_SNAKE_CASE ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ) -> bool:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self._buckets[ind]
if not stored:
__lowerCAmelCase : int = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
__lowerCAmelCase : Tuple = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : str ) -> bool:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCAmelCase : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : str ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = self._buckets
__lowerCAmelCase : Union[str, Any] = [None] * new_size
__lowerCAmelCase : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def SCREAMING_SNAKE_CASE ( self : Any ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any ) -> Iterator[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self._get_bucket_index(_SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCAmelCase : str = self._get_next_ind(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
if self._try_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
break
def __setitem__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Any ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __delitem__( self : str , lowerCAmelCase : List[str] ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(_SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
__lowerCAmelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] , lowerCAmelCase : List[str] ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 371 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 139 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =DownBlockaD # noqa F405
lowerCamelCase : Dict ="""down"""
def __a ( self ) -> Union[str, Any]:
a : List[Any] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =ResnetDownsampleBlockaD # noqa F405
lowerCamelCase : str ="""down"""
def __a ( self ) -> List[str]:
a : Union[str, Any] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =AttnDownBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""down"""
def __a ( self ) -> List[str]:
a : str = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =CrossAttnDownBlockaD # noqa F405
lowerCamelCase : Any ="""down"""
def __a ( self ) -> Any:
a, a : Optional[int] = super().prepare_init_args_and_inputs_for_common()
a : Optional[Any] = 32
return init_dict, inputs_dict
def __a ( self ) -> List[str]:
a : Optional[Any] = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase : int ="""down"""
@property
def __a ( self ) -> List[str]:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Any:
a, a : Optional[int] = super().prepare_init_args_and_inputs_for_common()
a : str = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> Dict:
a : Union[str, Any] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =SkipDownBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""down"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =AttnSkipDownBlockaD # noqa F405
lowerCamelCase : Tuple ="""down"""
@property
def __a ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Optional[Any] = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =DownEncoderBlockaD # noqa F405
lowerCamelCase : Optional[Any] ="""down"""
@property
def __a ( self ) -> int:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Dict = {
"in_channels": 32,
"out_channels": 32,
}
a : Any = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> List[str]:
a : List[str] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405
lowerCamelCase : Optional[Any] ="""down"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> str:
a : List[Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =UNetMidBlockaD # noqa F405
lowerCamelCase : Tuple ="""mid"""
def __a ( self ) -> str:
a : Optional[Any] = {
"in_channels": 32,
"temb_channels": 128,
}
a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> int:
a : Optional[Any] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase : Optional[int] ="""mid"""
def __a ( self ) -> Union[str, Any]:
a, a : Dict = super().prepare_init_args_and_inputs_for_common()
a : Tuple = 32
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
a : str = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase : str ="""mid"""
@property
def __a ( self ) -> int:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a, a : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
a : Tuple = 32
return init_dict, inputs_dict
def __a ( self ) -> Union[str, Any]:
a : str = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =UpBlockaD # noqa F405
lowerCamelCase : int ="""up"""
@property
def __a ( self ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> str:
a : Optional[int] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =ResnetUpsampleBlockaD # noqa F405
lowerCamelCase : Any ="""up"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : int = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =CrossAttnUpBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""up"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a, a : Any = super().prepare_init_args_and_inputs_for_common()
a : int = 32
return init_dict, inputs_dict
def __a ( self ) -> int:
a : List[str] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase : Any ="""up"""
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a, a : str = super().prepare_init_args_and_inputs_for_common()
a : List[str] = 32
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
a : Dict = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =AttnUpBlockaD # noqa F405
lowerCamelCase : int ="""up"""
@property
def __a ( self ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> Optional[Any]:
a : Any = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =SkipUpBlockaD # noqa F405
lowerCamelCase : int ="""up"""
@property
def __a ( self ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Optional[Any] = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =AttnSkipUpBlockaD # noqa F405
lowerCamelCase : Dict ="""up"""
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =UpDecoderBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""up"""
@property
def __a ( self ) -> List[str]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Optional[int] = {"in_channels": 32, "out_channels": 32}
a : Any = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =AttnUpDecoderBlockaD # noqa F405
lowerCamelCase : List[Any] ="""up"""
@property
def __a ( self ) -> Optional[int]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[Any] = {"in_channels": 32, "out_channels": 32}
a : int = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
a : Any = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(lowerCAmelCase__ )
| 105 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 0 |
'''simple docstring'''
import argparse
import os
import re
__snake_case = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__snake_case = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case = re.compile(R'''\[([^\]]+)\]''')
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _re_indent.search(__a )
return "" if search is None else search.groups()[0]
def a ( __a , __a="" , __a=None , __a=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = 0
UpperCamelCase__ :Optional[Any] = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__a ):
index += 1
UpperCamelCase__ :Optional[Any] = ['''\n'''.join(lines[:index] )]
else:
UpperCamelCase__ :int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ :Dict = [lines[index]]
index += 1
while index < len(__a ) and (end_prompt is None or not lines[index].startswith(__a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__a ) )
if index < len(__a ) - 1:
UpperCamelCase__ :Optional[Any] = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ :int = []
else:
blocks.append('''\n'''.join(__a ) )
UpperCamelCase__ :List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__a ) > 0:
blocks.append('''\n'''.join(__a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__a ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( __a ) -> str:
'''simple docstring'''
def _inner(__a ):
return key(__a ).lower().replace('''_''' , '''''' )
return _inner
def a ( __a , __a=None ) -> Union[str, Any]:
'''simple docstring'''
def noop(__a ):
return x
if key is None:
UpperCamelCase__ :Optional[int] = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ :List[str] = [obj for obj in objects if key(__a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ :Tuple = [obj for obj in objects if key(__a )[0].isupper() and not key(__a ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ :int = [obj for obj in objects if not key(__a )[0].isupper()]
UpperCamelCase__ :List[Any] = ignore_underscore(__a )
return sorted(__a , key=__a ) + sorted(__a , key=__a ) + sorted(__a , key=__a )
def a ( __a ) -> str:
'''simple docstring'''
def _replace(__a ):
UpperCamelCase__ :Any = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
UpperCamelCase__ :Optional[Any] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ :int = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__a )] ) + "]"
UpperCamelCase__ :Dict = import_statement.split('''\n''' )
if len(__a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ :str = 2 if lines[1].strip() == '''[''' else 1
UpperCamelCase__ :Optional[int] = [(i, _re_strip_line.search(__a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ :Union[str, Any] = sort_objects(__a , key=lambda __a : x[1] )
UpperCamelCase__ :Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ :str = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ :Union[str, Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ :Dict = keys[:-1]
UpperCamelCase__ :Tuple = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__a )] )
return "\n".join(__a )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ :int = _re_bracket_content.sub(_replace , __a )
return import_statement
def a ( __a , __a=True ) -> Optional[Any]:
'''simple docstring'''
with open(__a , '''r''' ) as f:
UpperCamelCase__ :Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ :Dict = split_code_in_indented_blocks(
__a , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ :Optional[Any] = main_blocks[block_idx]
UpperCamelCase__ :List[Any] = block.split('''\n''' )
# Get to the start of the imports.
UpperCamelCase__ :str = 0
while line_idx < len(__a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ :Union[str, Any] = len(__a )
else:
line_idx += 1
if line_idx >= len(__a ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ :Tuple = '''\n'''.join(block_lines[line_idx:-1] )
UpperCamelCase__ :Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ :int = split_code_in_indented_blocks(__a , indent_level=__a )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ :str = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ :List[str] = [(pattern.search(__a ).groups()[0] if pattern.search(__a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ :Union[str, Any] = [(i, key) for i, key in enumerate(__a ) if key is not None]
UpperCamelCase__ :List[str] = [x[0] for x in sorted(__a , key=lambda __a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :int = []
for i in range(len(__a ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ :Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__a )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ :str = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__a ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__a , '''w''' ) as f:
f.write('''\n'''.join(__a ) )
def a ( __a=True ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
UpperCamelCase__ :Optional[Any] = sort_imports(os.path.join(__a , '''__init__.py''' ) , check_only=__a )
if result:
UpperCamelCase__ :List[str] = [os.path.join(__a , '''__init__.py''' )]
if len(__a ) > 0:
raise ValueError(f'''Would overwrite {len(__a )} files, run `make style`.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = [True] * 1000001
__snake_case = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__snake_case = False
i += 1
def a ( __a ) -> bool:
'''simple docstring'''
return seive[n]
def a ( __a ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(__a ) )
def a ( __a = 1000000 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__a ) and not contains_an_even_digit(__a ):
UpperCamelCase__ :str = str(__a )
UpperCamelCase__ :List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )]
if all(is_prime(__a ) for i in list_nums ):
result.append(__a )
return result
def a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""") | 219 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 69 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
def snake_case( __magic_name__ ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase : Union[str, Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase : Optional[Any] = 1
if upper_limit > 0:
lowercase : Dict = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__magic_name__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowerCAmelCase_ = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 364 |
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase : str = [p / w for p, w in zip(__magic_name__ , __magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase : str = sorted(__magic_name__ )
# declaring useful variables
lowercase : Union[str, Any] = len(__magic_name__ )
lowercase : Optional[int] = 0
lowercase : Optional[int] = 0
lowercase : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase : Optional[int] = sorted_profit_by_weight[length - i - 1]
lowercase : Union[str, Any] = profit_by_weight.index(__magic_name__ )
lowercase : Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase_ = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase_ = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase_ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight) | 116 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: int = "efficientformer"
def __init__( self: int , __lowerCamelCase: List[int] = [3, 2, 6, 4] , __lowerCamelCase: List[int] = [48, 96, 2_24, 4_48] , __lowerCamelCase: List[bool] = [True, True, True, True] , __lowerCamelCase: int = 4_48 , __lowerCamelCase: int = 32 , __lowerCamelCase: int = 4 , __lowerCamelCase: int = 7 , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 8 , __lowerCamelCase: int = 4 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: int = 16 , __lowerCamelCase: int = 3 , __lowerCamelCase: int = 3 , __lowerCamelCase: int = 3 , __lowerCamelCase: int = 2 , __lowerCamelCase: int = 1 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: int = 1 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: str = "gelu" , __lowerCamelCase: float = 0.02 , __lowerCamelCase: float = 1e-12 , __lowerCamelCase: int = 2_24 , __lowerCamelCase: float = 1e-05 , **__lowerCamelCase: Optional[int] , ) -> None:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : Tuple = mlp_expansion_ratio
__UpperCAmelCase : str = downsamples
__UpperCAmelCase : Optional[int] = dim
__UpperCAmelCase : Any = key_dim
__UpperCAmelCase : List[str] = attention_ratio
__UpperCAmelCase : int = resolution
__UpperCAmelCase : Union[str, Any] = pool_size
__UpperCAmelCase : Union[str, Any] = downsample_patch_size
__UpperCAmelCase : Any = downsample_stride
__UpperCAmelCase : List[Any] = downsample_pad
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : str = num_metaad_blocks
__UpperCAmelCase : Dict = distillation
__UpperCAmelCase : str = use_layer_scale
__UpperCAmelCase : int = layer_scale_init_value
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = batch_norm_eps
| 157 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _snake_case ( _lowercase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=13 , __lowerCamelCase: Any=7 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: int=5_12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : List[str] = embedding_size
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = TFMobileBertModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : Dict = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(__lowerCamelCase )[0]
__UpperCAmelCase : Any = [1, 6, 3_05_22]
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : str = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 157 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[Any]:
_lowercase : Any = coefficient_matrix.shape
_lowercase : Any = constant_matrix.shape
if rowsa != colsa:
_lowercase : str = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__a )
if colsa != 1:
_lowercase : Tuple = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__a )
if rowsa != rowsa:
_lowercase : List[str] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__a )
if len(__a ) != rowsa:
_lowercase : Optional[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(__a )} and {rowsa}'''
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowercase : Dict = table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
_lowercase : Any = []
for row in range(__a ):
_lowercase : int = 0
for col in range(__a ):
if col == row:
_lowercase : List[str] = table[row][col]
elif col == cols - 1:
_lowercase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowercase : List[str] = (temp + val) / denom
new_val.append(__a )
_lowercase : Union[str, Any] = new_val
return [float(__a ) for i in new_val]
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = table.shape
_lowercase : List[Any] = True
for i in range(0 , __a ):
_lowercase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_lowercase : Union[str, Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_lowercase : Optional[int] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowercase : Tuple = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowercase : Dict = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(lowerCamelCase_ )-1}''' )
if "norm" in key:
_lowercase : Tuple = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowercase : str = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_lowercase : Union[str, Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(lowerCamelCase_ )-1}''' )
if "layer_norm1" in key:
_lowercase : int = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_lowercase : str = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowercase : Optional[Any] = key[key.find('block' ) + len('block' )]
_lowercase : Optional[int] = key.replace(F'''block{idx}''' , F'''block.{int(lowerCamelCase_ )-1}''' )
if "attn.q" in key:
_lowercase : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_lowercase : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_lowercase : int = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_lowercase : Any = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_lowercase : str = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_lowercase : Optional[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_lowercase : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
_lowercase : str = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowercase : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
_lowercase : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(lowerCamelCase_ )-1}''' )
if "bot_conv" in key:
_lowercase : Optional[int] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_lowercase : str = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_lowercase : List[str] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_lowercase : List[Any] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_lowercase : List[str] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_lowercase : Optional[int] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_lowercase : Any = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_lowercase : Dict = key.replace('module.last_layer_depth' , 'head.head' )
_lowercase : Dict = value
return new_state_dict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowercase : List[str] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowercase : Union[str, Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowercase : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
_lowercase : List[str] = kv_bias[: config.hidden_sizes[i]]
_lowercase : Any = kv_weight[
config.hidden_sizes[i] :, :
]
_lowercase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCamelCase_( ) -> Union[str, Any]:
_lowercase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=None ) -> Dict:
_lowercase : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowercase : str = GLPNImageProcessor()
# prepare image
_lowercase : List[Any] = prepare_img()
_lowercase : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_lowercase : Any = torch.load(lowerCamelCase_ , map_location=torch.device('cpu' ) )
# rename keys
_lowercase : List[Any] = rename_keys(lowerCamelCase_ )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase_ , lowerCamelCase_ )
# create HuggingFace model and load state dict
_lowercase : Union[str, Any] = GLPNForDepthEstimation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# forward pass
_lowercase : List[Any] = model(lowerCamelCase_ )
_lowercase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowercase : Optional[int] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_lowercase : str = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_lowercase : Optional[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 62 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE__ = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : int , __UpperCamelCase : set , __UpperCamelCase : set ) -> bool:
"""simple docstring"""
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 204 | import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__lowerCamelCase : Optional[Any] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCamelCase : int = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = numpy_to_pil(__UpperCamelCase )
return images
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if images.ndim == 3:
SCREAMING_SNAKE_CASE__ = images[None, ...]
SCREAMING_SNAKE_CASE__ = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
SCREAMING_SNAKE_CASE__ = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images
| 204 | 1 |
from __future__ import annotations
from collections.abc import Callable
__lowerCamelCase : Any = list[list[float | int]]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [[0 for _ in range(size + 1 )] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
for row in range(__UpperCamelCase ):
for col in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = matrix[row][col]
SCREAMING_SNAKE_CASE__ = vector[row][0]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCamelCase , __UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCamelCase ):
for row in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCamelCase )
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> Callable[[int], int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [[0 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = [[0] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
for x_val, y_val in enumerate(__UpperCamelCase ):
for col in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ = y_val
SCREAMING_SNAKE_CASE__ = solve(__UpperCamelCase , __UpperCamelCase )
def interpolated_func(__UpperCamelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCamelCase ) )
return interpolated_func
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable[[int], int] = question_function , __UpperCamelCase : int = 10 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [func(__UpperCamelCase ) for x_val in range(1 , order + 1 )]
SCREAMING_SNAKE_CASE__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE__ = 1
while func(__UpperCamelCase ) == poly(__UpperCamelCase ):
x_val += 1
ret += poly(__UpperCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 219 | import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Any = 16
__lowerCamelCase : List[Any] = 32
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 , __UpperCamelCase : str = "bert-base-cased" ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE__ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
SCREAMING_SNAKE_CASE__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE__ = int(__UpperCamelCase ) + 1
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE__ = f"""epoch_{epoch}"""
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = accuracy
SCREAMING_SNAKE_CASE__ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE__ = optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE__ = epoch
SCREAMING_SNAKE_CASE__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=2 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 219 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class SCREAMING_SNAKE_CASE ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Tuple = tokenizer
__lowerCAmelCase : str = AutoConfig.from_pretrained(_a )
__lowerCAmelCase : Dict = TFAutoModel.from_config(_a )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer(_a )
__lowerCAmelCase : List[Any] = self.bert(**_a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Union[str, Any] = [
BertTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowerCAmelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_a , use_fast_bert_tokenizer=_a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase : str = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__lowerCAmelCase : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCAmelCase : int = tokenizer(_a , return_tensors="""tf""" , padding="""longest""" )
__lowerCAmelCase : int = tf_tokenizer(_a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : Any = tf_tokenizer(self.paired_sentences )
__lowerCAmelCase : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : Tuple = tf.function(_a )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCAmelCase : Optional[Any] = tf.constant(_a )
__lowerCAmelCase : Optional[int] = compiled_tokenizer(_a )
__lowerCAmelCase : Any = tf_tokenizer(_a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : str = ModelToSave(tokenizer=_a )
__lowerCAmelCase : Optional[int] = tf.convert_to_tensor(self.test_sentences )
__lowerCAmelCase : List[Any] = model(_a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase : List[Any] = Path(_a ) / "saved.model"
model.save(_a )
__lowerCAmelCase : str = tf.keras.models.load_model(_a )
__lowerCAmelCase : str = loaded_model(_a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 369 |
from pathlib import Path
import fire
def snake_case_ (__A : str , __A : str , __A : int ) -> Any:
__lowerCAmelCase : Tuple = Path(__A )
__lowerCAmelCase : Tuple = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
__lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase : Dict = dest_dir.joinpath(path.name )
print(__A )
dest_path.open("""w""" ).write("""\n""".join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 139 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_UpperCamelCase : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
lowercase__ : Union[str, Any] = TOKEN
HfFolder.save_token(a )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowercase__ : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , repo_id='test-config' , push_to_hub=a , use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : str = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowercase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-config-org' , push_to_hub=a , use_auth_token=self._token )
lowercase__ : Any = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _UpperCAmelCase ( self ) -> int:
CustomConfig.register_for_auto_class()
lowercase__ : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowercase__ : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : Optional[int] = c.n_embd + 1 # int
lowercase__ : Optional[int] = c.resid_pdrop + 1.0 # float
lowercase__ : List[Any] = not c.scale_attn_weights # bool
lowercase__ : Union[str, Any] = c.summary_type + 'foo' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(a , c.summary_type , 'mismatch for key: summary_type' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[Any] = PretrainedConfig()
lowercase__ : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowercase__ : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(a , a )]
if len(a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f""" {", ".join(a )}.""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
with self.assertRaises(a ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowercase__ : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
lowercase__ : List[Any] = mock.Mock()
lowercase__ : Optional[int] = 5_0_0
lowercase__ : Any = {}
lowercase__ : Union[str, Any] = HTTPError
lowercase__ : Any = {}
# Download this model to make sure it's in the cache.
lowercase__ : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=a ) as mock_head:
lowercase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Union[str, Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowercase__ : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(a )
lowercase__ : str = 2
json.dump(configuration.to_dict() , open(os.path.join(a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Tuple = AutoConfig.from_pretrained(a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Any = ['config.42.0.0.json']
lowercase__ : Optional[int] = 7_6_8
configuration.save_pretrained(a )
shutil.move(os.path.join(a , 'config.4.0.0.json' ) , os.path.join(a , 'config.42.0.0.json' ) )
lowercase__ : Dict = AutoConfig.from_pretrained(a )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def _UpperCAmelCase ( self ) -> List[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Dict = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowercase__ : str = 'v4.0.0'
lowercase__ , lowercase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
a , return_unused_kwargs=a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict = 'v3.0.0'
lowercase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(a )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 77 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=0.0, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = "layer_norm", lowerCamelCase__ = False, ):
super().__init__()
A : Dict = only_cross_attention
A : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A : Dict = AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A : List[str] = AdaLayerNormZero(lowerCamelCase__, lowerCamelCase__ )
else:
A : Tuple = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : Any = Attention(
query_dim=lowerCamelCase__, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase__, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A : int = (
AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
)
A : Dict = Attention(
query_dim=lowerCamelCase__, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, upcast_attention=lowerCamelCase__, ) # is self-attn if encoder_hidden_states is none
else:
A : Dict = None
A : Dict = None
# 3. Feed-forward
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : int = FeedForward(lowerCamelCase__, dropout=lowerCamelCase__, activation_fn=lowerCamelCase__, final_dropout=lowerCamelCase__ )
# let chunk size default to None
A : Optional[Any] = None
A : int = 0
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# Sets chunk feed-forward
A : List[str] = chunk_size
A : int = dim
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A : Optional[int] = self.norma(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A : Tuple = self.norma(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hidden_dtype=hidden_states.dtype )
else:
A : Tuple = self.norma(lowerCamelCase__ )
A : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A : str = self.attna(
lowerCamelCase__, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
if self.use_ada_layer_norm_zero:
A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
A : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A : Optional[int] = (
self.norma(lowerCamelCase__, lowerCamelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase__ )
)
A : Optional[Any] = self.attna(
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = attn_output + hidden_states
# 3. Feed-forward
A : str = self.norma(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase__, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
A : Dict = self.ff(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
A : Optional[int] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "geglu", lowerCamelCase__ = False, ):
super().__init__()
A : str = int(dim * mult )
A : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A : Dict = GELU(lowerCamelCase__, lowerCamelCase__ )
if activation_fn == "gelu-approximate":
A : Optional[int] = GELU(lowerCamelCase__, lowerCamelCase__, approximate="""tanh""" )
elif activation_fn == "geglu":
A : Any = GEGLU(lowerCamelCase__, lowerCamelCase__ )
elif activation_fn == "geglu-approximate":
A : Optional[Any] = ApproximateGELU(lowerCamelCase__, lowerCamelCase__ )
A : Dict = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase__, lowerCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for module in self.net:
A : int = module(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = "none" ):
super().__init__()
A : Optional[int] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : int = approximate
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
A : Union[str, Any] = self.gelu(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, dim_out * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : Any = self.proj(lowerCamelCase__ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = nn.Embedding(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = nn.SiLU()
A : Tuple = nn.Linear(lowerCamelCase__, embedding_dim * 2 )
A : List[str] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.linear(self.silu(self.emb(lowerCamelCase__ ) ) )
A , A : Optional[int] = torch.chunk(lowerCamelCase__, 2 )
A : Tuple = self.norm(lowerCamelCase__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = CombinedTimestepLabelEmbeddings(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = nn.SiLU()
A : str = nn.Linear(lowerCamelCase__, 6 * embedding_dim, bias=lowerCamelCase__ )
A : List[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__, eps=1e-6 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : Tuple = self.linear(self.silu(self.emb(lowerCamelCase__, lowerCamelCase__, hidden_dtype=lowerCamelCase__ ) ) )
A , A , A , A , A , A : Optional[Any] = emb.chunk(6, dim=1 )
A : List[str] = self.norm(lowerCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 1e-5 ):
super().__init__()
A : int = num_groups
A : Dict = eps
if act_fn is None:
A : Union[str, Any] = None
else:
A : Optional[int] = get_activation(lowerCamelCase__ )
A : Dict = nn.Linear(lowerCamelCase__, out_dim * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.act:
A : Dict = self.act(lowerCamelCase__ )
A : Optional[int] = self.linear(lowerCamelCase__ )
A : int = emb[:, :, None, None]
A , A : Tuple = emb.chunk(2, dim=1 )
A : Any = F.group_norm(lowerCamelCase__, self.num_groups, eps=self.eps )
A : List[str] = x * (1 + scale) + shift
return x
| 116 | 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
A : Union[str, Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
A : List[Any] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
A : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None):
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE)),
}
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = []
A : Union[str, Any] = []
for i in range(self.num_layers ):
A : Any = self.in_channels if i == 0 else self.out_channels
A : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnets
A : Union[str, Any] = attentions
if self.add_downsample:
A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = []
for i in range(self.num_layers ):
A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
if self.add_downsample:
A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
A : str = ()
for resnet in self.resnets:
A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = []
A : Optional[int] = []
for i in range(self.num_layers ):
A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : Dict = self.prev_output_channel if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
A : Optional[Any] = attentions
if self.add_upsample:
A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A : List[str] = res_hidden_states_tuple[-1]
A : int = res_hidden_states_tuple[:-1]
A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = []
for i in range(self.num_layers ):
A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
A : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[Any] = resnets
if self.add_upsample:
A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A : Optional[int] = res_hidden_states_tuple[-1]
A : Optional[Any] = res_hidden_states_tuple[:-1]
A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A : List[Any] = []
for _ in range(self.num_layers ):
A : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[str] = resnets
A : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
return hidden_states
| 3 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
lowerCAmelCase_ :Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs()
lowerCAmelCase_ :List[str] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images
lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = ort.SessionOptions()
lowerCAmelCase_ :Dict = False
return options
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images
lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :List[str] = init_image.resize((128, 128) )
lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :int = output.images
lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 84 | 0 |
"""simple docstring"""
import math
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _SCREAMING_SNAKE_CASE = 10_001 ) -> int:
try:
snake_case_ = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
snake_case_ = []
snake_case_ = 2
while len(_SCREAMING_SNAKE_CASE ) < nth:
if is_prime(_SCREAMING_SNAKE_CASE ):
primes.append(_SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(_SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : List[Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowercase ( __a ):
'''simple docstring'''
a : Any = "vit_msn"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-0_6 ,_lowerCamelCase=224 ,_lowerCamelCase=16 ,_lowerCamelCase=3 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
| 369 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 217 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ : Optional[Any] = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCAmelCase_ : Optional[int] = {'''facebook/blenderbot-3B''': 1_28}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = ["""input_ids""", """attention_mask"""]
snake_case__ : Union[str, Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : int="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=True , **__lowerCamelCase : Dict , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :Tuple = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
UpperCamelCase :List[str] = add_prefix_space
UpperCamelCase :Optional[int] = pre_tok_class(**__lowerCamelCase )
UpperCamelCase :Any = add_prefix_space
UpperCamelCase :int = """post_processor"""
UpperCamelCase :str = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
UpperCamelCase :str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase :Tuple = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase :int = tuple(state["""cls"""] )
UpperCamelCase :List[Any] = False
if state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :int = add_prefix_space
UpperCamelCase :Dict = True
if state.get("""trim_offsets""" , __lowerCamelCase ) != trim_offsets:
UpperCamelCase :Optional[Any] = trim_offsets
UpperCamelCase :Union[str, Any] = True
if changes_to_apply:
UpperCamelCase :str = getattr(__lowerCamelCase , state.pop("""type""" ) )
UpperCamelCase :Any = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _A ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _A ( self : str , __lowerCamelCase : int ):
UpperCamelCase :int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
UpperCamelCase :str = value
def _A ( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ):
UpperCamelCase :int = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : str ):
UpperCamelCase :Tuple = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Optional[Any] = [self.sep_token_id]
UpperCamelCase :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , __lowerCamelCase : "Conversation" ):
UpperCamelCase :Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = """ """.join(__lowerCamelCase )
UpperCamelCase :Tuple = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
UpperCamelCase :List[str] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 38 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A_ ( snake_case ):
return 1 / (1 + np.exp(-z ))
def A_ ( snake_case , snake_case ):
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def A_ ( snake_case , snake_case , snake_case , snake_case=70000 ):
SCREAMING_SNAKE_CASE:List[str] = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:List[str] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE:Any = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:Dict = cost_function(snake_case , snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def A_ ( snake_case ):
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A_) , (A_)) = (x[:, 0].min(), x[:, 0].max())
((A_) , (A_)) = (x[:, 1].min(), x[:, 1].max())
((A_) , (A_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 139 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
_SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__lowerCamelCase , param_name="crop_size" )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = size
_SCREAMING_SNAKE_CASE : str = resample
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
_SCREAMING_SNAKE_CASE : Dict = crop_size
_SCREAMING_SNAKE_CASE : Tuple = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
__lowerCamelCase , size=(size["height"], size["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> Tuple:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" )
_SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : int = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Dict = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Dict = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : Optional[int] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) | 325 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase ) | 325 | 1 |
from math import ceil
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Union[str, Any] = list(range(0 , a__))
a_ : Union[str, Any] = [item for sublist in list(device_map.values()) for item in sublist]
# Duplicate check
a_ : str = []
for i in device_map_blocks:
if device_map_blocks.count(a__) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a__)
# Missing blocks
a_ : str = [i for i in blocks if i not in device_map_blocks]
a_ : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(a__) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(a__))
if len(a__) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(a__))
if len(a__) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(a__))
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : List[Any] = list(range(a__))
a_ : Optional[int] = int(ceil(n_layers / len(a__)))
a_ : str = [layers[i : i + n_blocks] for i in range(0 , a__ , a__)]
return dict(zip(a__ , a__))
| 248 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
a = {}
a = {}
if prompt is not None:
a = prompt
if generate_kwargs is not None:
a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
a = self.model.config.model_type
if model_type == "git":
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
a = [self.tokenizer.cls_token_id] + input_ids
a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
a = None
return model_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
a = None
if generate_kwargs is None:
a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
a = model_inputs.pop(self.model.main_input_name )
a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
for output_ids in model_outputs:
a = {
"generated_text": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 227 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[Any] ):
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]="attention" ):
__lowerCAmelCase = __lowerCAmelCase = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
__lowerCAmelCase = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
__lowerCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
__lowerCAmelCase = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
__lowerCAmelCase = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : str=False ):
if split_mlp_wi:
__lowerCAmelCase = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
__lowerCAmelCase = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
__lowerCAmelCase = (wi_a, wi_a)
else:
__lowerCAmelCase = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
__lowerCAmelCase = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : int ):
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def a_ ( lowerCAmelCase_ : dict, *, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = traverse_util.flatten_dict(variables['target'] )
__lowerCAmelCase = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCAmelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:', lowerCAmelCase_ )
__lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
__lowerCAmelCase = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase_, lowerCAmelCase_, 'encoder', 'pre_attention_layer_norm' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(lowerCAmelCase_, lowerCAmelCase_, 'encoder', 'attention' )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase_, lowerCAmelCase_, 'encoder', 'pre_mlp_layer_norm' )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(lowerCAmelCase_, lowerCAmelCase_, 'encoder', lowerCAmelCase_ )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase_, lowerCAmelCase_, 'encoder' ).T
__lowerCAmelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase_, 0, 'encoder' ).T
__lowerCAmelCase = tax_relpos_bias_lookup(
lowerCAmelCase_, 0, 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', 'pre_self_attention_layer_norm' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', 'self_attention' )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', 'pre_cross_attention_layer_norm' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', 'encoder_decoder_attention' )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', 'pre_mlp_layer_norm' )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder', lowerCAmelCase_ )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(lowerCAmelCase_, lowerCAmelCase_, 'decoder' ).T
__lowerCAmelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCAmelCase = old['decoder/logits_dense/kernel'].T
return new
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : bool ):
__lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__lowerCAmelCase = state_dict['shared.weight']
return state_dict
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__lowerCAmelCase = convert_tax_to_pytorch(
lowerCAmelCase_, num_layers=config.num_layers, is_encoder_only=lowerCAmelCase_, scalable_attention=lowerCAmelCase_ )
__lowerCAmelCase = make_state_dict(lowerCAmelCase_, lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : bool = False, lowerCAmelCase_ : bool = False, ):
__lowerCAmelCase = MTaConfig.from_json_file(lowerCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCAmelCase = UMTaEncoderModel(lowerCAmelCase_ )
else:
__lowerCAmelCase = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_snake_case : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 207 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 8_8 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "geglu" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , ) -> Dict:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , eps=1e-6 , affine=lowerCAmelCase_ )
__lowerCAmelCase = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dropout=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , double_self_attention=lowerCAmelCase_ , norm_elementwise_affine=lowerCAmelCase_ , )
for d in range(lowerCAmelCase_ )
] )
__lowerCAmelCase = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : bool = True , ) -> str:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(lowerCAmelCase_ )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.proj_in(lowerCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , class_labels=lowerCAmelCase_ , )
# 3. Output
__lowerCAmelCase = self.proj_out(lowerCAmelCase_ )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase_ )
| 207 | 1 |
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str ):
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ):
print("""moving disk from""" , lowerCAmelCase_ , """to""" , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Tuple = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowerCAmelCase_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main() | 233 |
# flake8: noqa
# Lint as: python3
lowerCamelCase : Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 233 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
__UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__UpperCAmelCase = state_dict['''cls.predictions.decoder.weight''']
__UpperCAmelCase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
__UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 228 |
'''simple docstring'''
def _snake_case ( A = 10 ) -> str:
if not isinstance(A , A ) or n < 0:
raise ValueError('''Invalid input''' )
lowerCAmelCase__ = 10**n
lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 228 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
UpperCamelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 80 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Optional[int] = add_prefix_space
__lowerCAmelCase: Dict = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: int = "post_processor"
__lowerCAmelCase: Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Dict = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: List[Any] = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: str = tuple(state["cls"])
__lowerCAmelCase: str = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Optional[Any] = add_prefix_space
__lowerCAmelCase: List[str] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Any = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: List[str] = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: int = value
def lowercase_ ( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 217 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 | """simple docstring"""
def lowercase ( a__ : Tuple , a__ : str ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( a__ : Optional[int] , a__ : List[str]=0 ) -> Optional[Any]:
return sorted(a__ , key=lambda a__ : x[column] )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Tuple=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[Any]=float('''inf''' ) ) -> str:
for i in range(min(6 , points_counts - 1 ) , a__ ):
for j in range(max(0 , i - 6 ) , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : int , a__ : str , a__ : Any ) -> str:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a__ , a__ )
# recursion
_UpperCamelCase = points_counts // 2
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[:mid] , a__ )
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCamelCase = min(a__ , a__ )
_UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a__ )
_UpperCamelCase = dis_between_closest_in_strip(
a__ , len(a__ ) , a__ )
return min(a__ , a__ )
def lowercase ( a__ : Dict , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = column_based_sort(a__ , column=0 )
_UpperCamelCase = column_based_sort(a__ , column=1 )
return (
closest_pair_of_points_sqr(
a__ , a__ , a__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 54 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 1 |
'''simple docstring'''
def a ( __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :Tuple = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
UpperCamelCase__ :str = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase__ :Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase__ :int = j
return prefix_result
def a ( __a ) -> int:
'''simple docstring'''
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 363 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
UpperCamelCase__ :Optional[int] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[int] = args.func(__a )
service.run()
if __name__ == "__main__":
main() | 219 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Union[str, Any] = '▁'
A__ : Dict = {'vocab_file': 'prophetnet.tokenizer'}
A__ : Optional[int] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
A__ : Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
A__ : Any = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = collections.OrderedDict()
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase__ = reader.readlines()
for index, token in enumerate(lowerCamelCase_ ):
lowercase__ = token.rstrip('''\n''' )
lowercase__ = index
return vocab
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : str="[SEP]", lowerCamelCase : Dict="[SEP]", lowerCamelCase : Optional[int]="[SEP]", lowerCamelCase : List[str]="[UNK]", lowerCamelCase : Optional[int]="[PAD]", lowerCamelCase : Optional[int]="[CLS]", lowerCamelCase : Union[str, Any]="[MASK]", lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Tuple, ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, sep_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase__ = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
lowercase__ = F"""[unused{i}]"""
lowercase__ = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase__ = 12
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Optional[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : List[str], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self : List[str], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : List[Any], lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : Any ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = ''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowercase__ ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase__ = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 207 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = TextToVideoSDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D'''), up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D'''), cross_attention_dim=32, attention_head_dim=4, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, )
lowercase__ = CLIPTextModel(lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase__ ( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : int=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = TextToVideoSDPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''np'''
lowercase__ = sd_pipe(**lowerCamelCase ).frames
lowercase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : str ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase, expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase, expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=25, output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=2, output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 207 | 1 |
from __future__ import annotations
def _A ( __magic_name__ ):
if len(__magic_name__ ) == 0:
return []
lowercase__ , lowercase__ = min(__magic_name__ ), max(__magic_name__ )
lowercase__ = int(max_value - min_value ) + 1
lowercase__ = [[] for _ in range(__magic_name__ )]
for i in my_list:
buckets[int(i - min_value )].append(__magic_name__ )
return [v for bucket in buckets for v in sorted(__magic_name__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 201 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowercase )
self.set_fail_transitions()
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self :List[str] , _lowercase :str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_lowercase , _lowercase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowercase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowercase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowercase , self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_lowercase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_lowercase ) ):
while (
self.find_next_state(_lowercase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_lowercase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_lowercase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self :Union[str, Any] , __magic_name__ :int = 6_5536 , __magic_name__ :Optional[int] = None , __magic_name__ :int = 2 , __magic_name__ :int = 2 , __magic_name__ :int = 0 , __magic_name__ :str = "fourier" , __magic_name__ :bool = True , __magic_name__ :bool = False , __magic_name__ :float = 0.0 , __magic_name__ :Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __magic_name__ :Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __magic_name__ :Tuple[str] = "UNetMidBlock1D" , __magic_name__ :str = None , __magic_name__ :Tuple[int] = (32, 32, 64) , __magic_name__ :str = None , __magic_name__ :int = 8 , __magic_name__ :int = 1 , __magic_name__ :bool = False , ):
'''simple docstring'''
super().__init__()
a = sample_size
# time
if time_embedding_type == "fourier":
a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__magic_name__ , log=__magic_name__ , flip_sin_to_cos=__magic_name__ )
a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__magic_name__ , downscale_freq_shift=__magic_name__ )
a = block_out_channels[0]
if use_timestep_embedding:
a = block_out_channels[0] * 4
a = TimestepEmbedding(
in_channels=__magic_name__ , time_embed_dim=__magic_name__ , act_fn=__magic_name__ , out_dim=block_out_channels[0] , )
a = nn.ModuleList([] )
a = None
a = nn.ModuleList([] )
a = None
# down
a = in_channels
for i, down_block_type in enumerate(__magic_name__ ):
a = output_channel
a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a = i == len(__magic_name__ ) - 1
a = get_down_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__magic_name__ )
# mid
a = get_mid_block(
__magic_name__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__magic_name__ , add_downsample=__magic_name__ , )
# up
a = list(reversed(__magic_name__ ) )
a = reversed_block_out_channels[0]
if out_block_type is None:
a = out_channels
else:
a = block_out_channels[0]
for i, up_block_type in enumerate(__magic_name__ ):
a = output_channel
a = (
reversed_block_out_channels[i + 1] if i < len(__magic_name__ ) - 1 else final_upsample_channels
)
a = i == len(__magic_name__ ) - 1
a = get_up_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__magic_name__ )
a = output_channel
# out
a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a = get_out_block(
out_block_type=__magic_name__ , num_groups_out=__magic_name__ , embed_dim=block_out_channels[0] , out_channels=__magic_name__ , act_fn=__magic_name__ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :torch.FloatTensor , __magic_name__ :Union[torch.Tensor, float, int] , __magic_name__ :bool = True , ):
'''simple docstring'''
a = timestep
if not torch.is_tensor(__magic_name__ ):
a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
a = timesteps[None].to(sample.device )
a = self.time_proj(__magic_name__ )
if self.config.use_timestep_embedding:
a = self.time_mlp(__magic_name__ )
else:
a = timestep_embed[..., None]
a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a = ()
for downsample_block in self.down_blocks:
a , a = downsample_block(hidden_states=__magic_name__ , temb=__magic_name__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a = self.mid_block(__magic_name__ , __magic_name__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a = down_block_res_samples[-1:]
a = down_block_res_samples[:-1]
a = upsample_block(__magic_name__ , res_hidden_states_tuple=__magic_name__ , temb=__magic_name__ )
# 5. post-process
if self.out_block:
a = self.out_block(__magic_name__ , __magic_name__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__magic_name__ )
| 228 |
def __A ( __lowerCamelCase ) -> int:
a = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
a = hex_num[0] == """-"""
if is_negative:
a = hex_num[1:]
try:
a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
a = """"""
while int_num > 0:
a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__magic_name__: List[Any] = True
except ImportError:
__magic_name__: Union[str, Any] = False
__magic_name__: Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( _A ):
"""simple docstring"""
return AddNewModelCommand(args.testing, args.testing_file, path=args.path )
class snake_case__ ( _lowerCAmelCase ):
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCAmelCase__ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCAmelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , *lowerCAmelCase__ ) -> Dict:
__magic_name__ : str = testing
__magic_name__ : Any = testing_file
__magic_name__ : Optional[Any] = path
def __magic_name__ ( self ) -> List[Any]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__magic_name__ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
__magic_name__ : Optional[Any] = (
Path(lowerCAmelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__magic_name__ : Optional[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase__ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
__magic_name__ : Any = json.load(lowerCAmelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase__ , extra_context=lowerCAmelCase__ , )
__magic_name__ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
__magic_name__ : int = json.load(lowerCAmelCase__ )
__magic_name__ : List[Any] = configuration["""lowercase_modelname"""]
__magic_name__ : List[Any] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'{directory}/configuration.json' )
__magic_name__ : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
__magic_name__ : Optional[int] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
__magic_name__ : Dict = """Flax""" in generate_tensorflow_pytorch_and_flax
__magic_name__ : Any = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCAmelCase__ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , """r""" ) as f:
__magic_name__ : List[Any] = f.readlines()
with open(lowerCAmelCase__ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Create temp file
__magic_name__ ,__magic_name__ : Union[str, Any] = mkstemp()
__magic_name__ : Optional[int] = False
with fdopen(lowerCAmelCase__ , """w""" ) as new_file:
with open(lowerCAmelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase__ )
if line_to_copy_below in line:
__magic_name__ : Dict = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase__ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase__ , lowerCAmelCase__ )
# Remove original file
remove(lowerCAmelCase__ )
# Move new file
move(lowerCAmelCase__ , lowerCAmelCase__ )
def skip_units(lowerCAmelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as datafile:
__magic_name__ : Dict = []
__magic_name__ : Optional[int] = False
__magic_name__ : Union[str, Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__magic_name__ : List[str] = line.split("""\"""" )[1]
__magic_name__ : Optional[Any] = skip_units(lowerCAmelCase__ )
elif "# Below: " in line and "##" not in line:
__magic_name__ : int = line.split("""\"""" )[1]
__magic_name__ : List[str] = skip_units(lowerCAmelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = []
elif "# Replace with" in line and "##" not in line:
__magic_name__ : Any = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase__ )
remove(lowerCAmelCase__ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCAmelCase__ )
| 138 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[Any] = []
for i in range(_A ):
__magic_name__ : Dict = i / num_diffusion_timesteps
__magic_name__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = 1.0 - self.betas
__magic_name__ : str = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Tuple = 1.0
# setable values
__magic_name__ : List[Any] = None
__magic_name__ : int = torch.from_numpy(np.arange(0 , lowerCAmelCase__ )[::-1].copy() )
__magic_name__ : List[Any] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = num_inference_steps
__magic_name__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : int = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : str = torch.log(torch.clamp(lowerCAmelCase__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : List[str] = variance.log()
__magic_name__ : Optional[int] = beta.log()
__magic_name__ : Any = (predicted_variance + 1) / 2
__magic_name__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ ,__magic_name__ : List[Any] = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Union[str, Any] = t - 1
__magic_name__ : List[str] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Any = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] = self.betas[t]
__magic_name__ : int = self.alphas[t]
else:
__magic_name__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Tuple = 0
if t > 0:
__magic_name__ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device )
__magic_name__ : Tuple = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Tuple = variance
elif self.variance_type == "learned_range":
__magic_name__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : Tuple = variance * variance_noise
__magic_name__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__magic_name__ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Any = timesteps.to(original_samples.device )
__magic_name__ : int = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 138 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 600851475143 ) -> int:
'''simple docstring'''
try:
A__ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 0 |
def _lowercase ( _UpperCAmelCase = 60_08_51_47_51_43 ) -> int:
try:
lowerCamelCase =int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCamelCase =1
lowerCamelCase =2
while i * i <= n:
while n % i == 0:
lowerCamelCase =i
n //= i
i += 1
if n > 1:
lowerCamelCase =n
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 262 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = 1_60_00 ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) )
if len(__UpperCamelCase ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE = randint(0 ,len(__UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[Any] = field(default=lowerCamelCase_ ,metadata={"help": "Name of a dataset from the datasets package"} )
__snake_case : List[str] = field(
default=lowerCamelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : List[Any] = field(
default=lowerCamelCase_ ,metadata={"help": "A file containing the training audio paths and labels."} )
__snake_case : Dict = field(
default=lowerCamelCase_ ,metadata={"help": "A file containing the validation audio paths and labels."} )
__snake_case : Optional[Any] = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
__snake_case : Any = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
__snake_case : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
__snake_case : Dict = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__snake_case : Union[str, Any] = field(
default=lowerCamelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Any = field(
default=lowerCamelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class __UpperCAmelCase :
__snake_case : List[Any] = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
__snake_case : List[Any] = field(
default=lowerCamelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[Any] = field(
default=lowerCamelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__snake_case : Optional[int] = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : List[Any] = field(
default=lowerCamelCase_ ,metadata={"help": "Name or path of preprocessor config."} )
__snake_case : Optional[int] = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__snake_case : int = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__snake_case : str = field(
default=lowerCamelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__snake_case : List[Any] = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : int = field(
default=lowerCamelCase_ ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowercase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE = DatasetDict()
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE = random_subsample(
audio["""array"""] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__UpperCamelCase )
_SCREAMING_SNAKE_CASE = feature_extractor(__UpperCamelCase ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(__UpperCamelCase )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE = feature_extractor(__UpperCamelCase ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(__UpperCamelCase )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(__UpperCamelCase ):
_SCREAMING_SNAKE_CASE = str(__UpperCamelCase )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__UpperCamelCase ,references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task="""audio-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__UpperCamelCase ,output_all_columns=__UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__UpperCamelCase ,output_all_columns=__UpperCamelCase )
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=raw_datasets["""train"""] if training_args.do_train else None ,eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" ,__UpperCamelCase )
trainer.save_metrics("""eval""" ,__UpperCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 306 | import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase : List[str] = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase : Optional[int] = object()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
def replace(__UpperCamelCase : Tuple , __UpperCamelCase : Any ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
SCREAMING_SNAKE_CASE__ = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 219 | 0 |
'''simple docstring'''
import pytest
_lowerCamelCase = """__dummy_dataset1__"""
_lowerCamelCase = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = dataset_loading_script_name
UpperCamelCase = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A__ )
UpperCamelCase = script_dir / F"""{script_name}.py"""
with open(A__ , 'w' ) as f:
f.write(A__ )
return str(A__ )
| 364 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
] , )
UpperCamelCase = text_generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = text_generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = TextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_generator, ["This is a test", "Another test"]
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'Hello I believe in'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = text_generator(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
UpperCamelCase = text_generator(UpperCamelCase__ , stop_sequence=' fe' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe'}] )
def A ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = text_generator.model
UpperCamelCase = text_generator.tokenizer
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = pipeline(task='text-generation' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , return_full_text=UpperCamelCase__ )
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_text=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase = text_generator('' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
UpperCamelCase = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(UpperCamelCase__ ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : Any ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=UpperCamelCase__ , top_p=0.5 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'Hello world'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
UpperCamelCase = logging.get_logger('transformers.generation.tf_utils' )
else:
UpperCamelCase = logging.get_logger('transformers.generation.utils' )
UpperCamelCase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(UpperCamelCase__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_new_tokens=1 )
self.assertNotIn(UpperCamelCase__ , cl.out )
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 )
self.assertNotIn(UpperCamelCase__ , cl.out )
| 249 | 0 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def lowerCAmelCase_ ( __UpperCAmelCase: list ) -> list:
if not sequence:
return []
if len(__UpperCAmelCase ) == 1:
return list(__UpperCAmelCase )
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : int = len(__UpperCAmelCase ) - 1
UpperCamelCase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase__ ,UpperCamelCase__ : Dict = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase__ : Optional[Any] = f"The elements inside the sequence must contains only {colors} values"
raise ValueError(__UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by commas:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 201 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> list[int]:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = len(__UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Tuple = i + 1
else:
UpperCamelCase__ : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 201 | 1 |
'''simple docstring'''
import numpy as np
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ = np.zeros((n + 1,) )
UpperCAmelCase_ = ya
UpperCAmelCase_ = xa
for k in range(snake_case_ ):
UpperCAmelCase_ = f(snake_case_ , y[k] )
UpperCAmelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ = f(x + h , y[k] + h * ka )
UpperCAmelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | '''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 106 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [int(_UpperCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
__A : Any = input().strip()
__A : Dict = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 138 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'Wrong input data\'s dimensions... '
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'Wrong input data\'s shape... '
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Any = (
'Input data have different datatype... '
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : int = []
for value in value_array:
lowerCAmelCase : Tuple = euclidean(_UpperCAmelCase, dataset[0] )
lowerCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Dict = euclidean(_UpperCAmelCase, _UpperCAmelCase )
if dist > temp_dist:
lowerCAmelCase : Tuple = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return np.dot(_UpperCAmelCase, _UpperCAmelCase ) / (norm(_UpperCAmelCase ) * norm(_UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = "Hello, World!"
_lowerCamelCase : Dict = "en_XX"
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : bool ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Path("""data_bin""" )
_lowerCAmelCase : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(UpperCamelCase_ ).parent ) , checkpoint_file=Path(UpperCamelCase_ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(UpperCamelCase_ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(UpperCamelCase_ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = xmod.model.encoder.sentence_encoder
_lowerCAmelCase : str = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , UpperCamelCase_ )
_lowerCAmelCase : List[str] = XmodForSequenceClassification(UpperCamelCase_ ) if classification_head else XmodForMaskedLM(UpperCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
_lowerCAmelCase : int = xmod_sent_encoder.embed_positions.weight
_lowerCAmelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCAmelCase : Dict = xmod_sent_encoder.layernorm_embedding.weight
_lowerCAmelCase : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase : List[Any] = model.roberta.encoder.layer[i]
_lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.layers[i]
# self attention
_lowerCAmelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
_lowerCAmelCase : Tuple = xmod_layer.self_attn.q_proj.weight
_lowerCAmelCase : Dict = xmod_layer.self_attn.q_proj.bias
_lowerCAmelCase : int = xmod_layer.self_attn.k_proj.weight
_lowerCAmelCase : int = xmod_layer.self_attn.k_proj.bias
_lowerCAmelCase : str = xmod_layer.self_attn.v_proj.weight
_lowerCAmelCase : List[str] = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase : Union[str, Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
_lowerCAmelCase : List[Any] = xmod_layer.self_attn.out_proj.weight
_lowerCAmelCase : str = xmod_layer.self_attn.out_proj.bias
_lowerCAmelCase : Dict = xmod_layer.self_attn_layer_norm.weight
_lowerCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCAmelCase : Any = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
_lowerCAmelCase : Optional[int] = xmod_layer.fca.weight
_lowerCAmelCase : List[str] = xmod_layer.fca.bias
# output
_lowerCAmelCase : Optional[int] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
_lowerCAmelCase : Dict = xmod_layer.fca.weight
_lowerCAmelCase : Optional[int] = xmod_layer.fca.bias
_lowerCAmelCase : str = xmod_layer.final_layer_norm.weight
_lowerCAmelCase : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCAmelCase : List[str] = xmod_layer.adapter_layer_norm.weight
_lowerCAmelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCAmelCase : str = bert_output.adapter_modules[lang_code]
_lowerCAmelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
_lowerCAmelCase : Optional[Any] = from_adapter.fca.weight
_lowerCAmelCase : Dict = from_adapter.fca.bias
_lowerCAmelCase : Tuple = from_adapter.fca.weight
_lowerCAmelCase : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCAmelCase : int = xmod_sent_encoder.layer_norm.weight
_lowerCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCAmelCase : int = xmod.model.classification_heads["""mnli"""].dense.weight
_lowerCAmelCase : Dict = xmod.model.classification_heads["""mnli"""].dense.bias
_lowerCAmelCase : int = xmod.model.classification_heads["""mnli"""].out_proj.weight
_lowerCAmelCase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.dense.weight
_lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
_lowerCAmelCase : str = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase : str = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.weight
_lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase : int = xmod.encode(UpperCamelCase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )[0]
if classification_head:
_lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""](xmod.extract_features(UpperCamelCase_ ) )
else:
_lowerCAmelCase : Optional[int] = xmod.model(UpperCamelCase_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
_lowerCAmelCase : List[Any] = torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(UpperCamelCase_ ).mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 159 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""input_features""", """is_longer"""]
def __init__( self , __lowercase=6_4 , __lowercase=4_8_0_0_0 , __lowercase=4_8_0 , __lowercase=1_0 , __lowercase=1_0_2_4 , __lowercase=0.0 , __lowercase=False , __lowercase = 0 , __lowercase = 1_4_0_0_0 , __lowercase = None , __lowercase = "fusion" , __lowercase = "repeatpad" , **__lowercase , ) -> List[str]:
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
lowerCAmelCase_ : int = top_db
lowerCAmelCase_ : List[Any] = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : int = fft_window_size
lowerCAmelCase_ : str = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Any = hop_length
lowerCAmelCase_ : Tuple = max_length_s
lowerCAmelCase_ : List[Any] = max_length_s * sampling_rate
lowerCAmelCase_ : Dict = sampling_rate
lowerCAmelCase_ : Optional[Any] = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm=__lowercase , mel_scale='''htk''' , )
lowerCAmelCase_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self ) -> Dict[str, Any]:
lowerCAmelCase_ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self , __lowercase , __lowercase = None ) -> np.ndarray:
lowerCAmelCase_ : Tuple = spectrogram(
__lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : Dict = [0]
# randomly choose index for each part
lowerCAmelCase_ : Dict = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[int] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : int = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : List[str] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : Optional[int] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : Optional[int] = torch.nn.functional.interpolate(
__lowercase , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=__lowercase )
lowerCAmelCase_ : str = mel_shrink[0][0].numpy()
lowerCAmelCase_ : List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : List[Any] = len(__lowercase ) - max_length
lowerCAmelCase_ : Tuple = np.random.randint(0 , overflow + 1 )
lowerCAmelCase_ : Any = waveform[idx : idx + max_length]
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(__lowercase , self.mel_filters )
lowerCAmelCase_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Any = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase_ : List[Any] = False
else:
lowerCAmelCase_ : Union[str, Any] = self._random_mel_fusion(__lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : Tuple = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
lowerCAmelCase_ : List[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : Any = int(max_length / len(__lowercase ) )
lowerCAmelCase_ : str = np.stack(np.tile(__lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[int] = int(max_length / len(__lowercase ) )
lowerCAmelCase_ : Any = np.stack(np.tile(__lowercase , __lowercase ) )
lowerCAmelCase_ : int = np.pad(__lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : List[Any] = self._np_extract_fbank_features(__lowercase , self.mel_filters )
lowerCAmelCase_ : str = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase_ : List[Any] = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ) -> BatchFeature:
lowerCAmelCase_ : Dict = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase_ : int = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase_ : List[str] = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Any = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
lowerCAmelCase_ : int = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(__lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : int = [
self._get_input_mel(__lowercase , max_length if max_length else self.nb_max_samples , __lowercase , __lowercase )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase )
is_longer.append(__lowercase )
if truncation == "fusion" and sum(__lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Optional[int] = np.random.randint(0 , len(__lowercase ) )
lowerCAmelCase_ : str = True
if isinstance(input_mel[0] , __lowercase ):
lowerCAmelCase_ : Tuple = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : Optional[int] = [[longer] for longer in is_longer]
lowerCAmelCase_ : List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase_ : int = BatchFeature(__lowercase )
if return_tensors is not None:
lowerCAmelCase_ : Any = input_features.convert_to_tensors(__lowercase )
return input_features | 262 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowerCAmelCase : Any = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCamelCase_( _lowerCamelCase = "dhaka" , _lowerCamelCase = 5 ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = min(_lowerCamelCase , 50 ) # Prevent abuse!
_lowerCamelCase : Optional[Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCamelCase : Dict = requests.get("https://www.google.com/search" , params=_lowerCamelCase , headers=_lowerCamelCase )
_lowerCamelCase : str = BeautifulSoup(html.text , "html.parser" )
_lowerCamelCase : Optional[Any] = ''''''.join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
_lowerCamelCase : Dict = json.dumps(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = json.loads(_lowerCamelCase )
_lowerCamelCase : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _lowerCamelCase , )
if not matched_google_image_data:
return 0
_lowerCamelCase : Tuple = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_lowerCamelCase ) , )
_lowerCamelCase : Optional[Any] = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _lowerCamelCase , )
for index, fixed_full_res_image in enumerate(_lowerCamelCase ):
if index >= max_images:
return index
_lowerCamelCase : Any = bytes(_lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_lowerCamelCase : str = bytes(_lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_lowerCamelCase : Tuple = urllib.request.build_opener()
_lowerCamelCase : List[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_lowerCamelCase )
_lowerCamelCase : Optional[int] = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
urllib.request.urlretrieve( # noqa: S310
_lowerCamelCase , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_lowerCAmelCase : str = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowercase = 25_60_47
_lowercase = 25_61_45
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = NllbTokenizer
_lowerCamelCase: Dict = NllbTokenizerFast
_lowerCamelCase: str = True
_lowerCamelCase: int = True
_lowerCamelCase: str = {}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
A = NllbTokenizer(A_ ,keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = NllbTokenizer(A_ ,keep_accents=A_ )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
A = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(A_ )
A = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(A_ ,A_ )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(A_ )
A = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ ,A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ )
A = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ ,A_ )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(A_ )
A = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ ,A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ )
A = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(A_ )
A = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ ,A_ ) )
shutil.rmtree(A_ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
if not self.test_seqaseq:
return
A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
A = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
A = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
A = tokenizer.prepare_seqaseq_batch(
src_texts=A_ ,tgt_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
A = tokenizer.prepare_seqaseq_batch(
A_ ,tgt_texts=A_ ,max_length=3 ,return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
A = tokenizer.prepare_seqaseq_batch(
src_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn('decoder_input_ids' ,A_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = [AddedToken('<special>' ,lstrip=A_ )]
A = self.rust_tokenizer_class.from_pretrained(
A_ ,additional_special_tokens=A_ ,**A_ )
A = tokenizer_r.encode('Hey this is a <special> token' )
A = tokenizer_r.encode('<special>' ,add_special_tokens=A_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A = self.rust_tokenizer_class.from_pretrained(
A_ ,additional_special_tokens=A_ ,**A_ ,)
A = self.tokenizer_class.from_pretrained(
A_ ,additional_special_tokens=A_ ,**A_ )
A = tokenizer_p.encode('Hey this is a <special> token' )
A = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(A_ ,A_ )
self.assertEqual(A_ ,A_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''facebook/nllb-200-distilled-600M'''
_lowerCamelCase: Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase: Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase: List[str] = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ) -> List[str]:
A = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' )
A = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] ,25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] ,25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] ,25_6057 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
self.assertIn(A_ ,self.tokenizer.all_special_ids )
# fmt: off
A = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
A = self.tokenizer.decode(A_ ,skip_special_tokens=A_ )
A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A_ )
self.assertEqual(A_ ,A_ )
self.assertNotIn(self.tokenizer.eos_token ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
A = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] ,A_ )
A = 10
A = self.tokenizer(A_ ,max_length=A_ ,truncation=A_ ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,A_ )
self.assertEqual(len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[25_6203, 3] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = tempfile.mkdtemp()
A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
A = NllbTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,A_ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,)
A = shift_tokens_right(
batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(A_ ,A_ )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,A_ )
self.assertEqual(A_ ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = self.tokenizer(self.src_text ,padding=A_ ,truncation=A_ ,max_length=3 ,return_tensors='pt' )
A = self.tokenizer(
text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=10 ,return_tensors='pt' )
A = targets['input_ids']
A = shift_tokens_right(
A_ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = self.tokenizer._build_translation_inputs(
'A test' ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(A_ ) ,{
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = True
A = self.tokenizer(
'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids ,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
A = False
A = self.tokenizer(
'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids ,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] ) | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 0 |
from __future__ import annotations
def UpperCamelCase_( snake_case__: int = 4 ) -> list[list[int]]:
UpperCAmelCase__ = abs(snake_case__ ) or 4
return [[1 + x + y * row_size for x in range(snake_case__ )] for y in range(snake_case__ )]
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
return reverse_row(transpose(snake_case__ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
return reverse_row(reverse_column(snake_case__ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
return reverse_column(transpose(snake_case__ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
UpperCAmelCase__ = [list(snake_case__ ) for x in zip(*snake_case__ )]
return matrix
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
UpperCAmelCase__ = matrix[::-1]
return matrix
def UpperCamelCase_( snake_case__: list[list[int]] ) -> list[list[int]]:
UpperCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def UpperCamelCase_( snake_case__: list[list[int]] ) -> None:
for i in matrix:
print(*snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
_UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
_UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : Any = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : Tuple ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,**lowercase_ : str ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase__ : Any = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = do_center_crop
lowerCAmelCase__ : int = crop_size
lowerCAmelCase__ : List[Any] = resample
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : List[Any] = rescale_factor
lowerCAmelCase__ : str = offset
lowerCAmelCase__ : Any = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Any = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" in size:
lowerCAmelCase__ : Any = get_resize_output_image_size(lowercase_ ,size['''shortest_edge'''] ,default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowerCAmelCase__ : Dict = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
lowerCAmelCase__ : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : bool = True ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
lowerCAmelCase__ : int = image.astype(np.floataa )
if offset:
lowerCAmelCase__ : Tuple = image - (scale / 2)
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = to_numpy_array(lowercase_ )
if do_resize:
lowerCAmelCase__ : Optional[int] = self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ )
if do_center_crop:
lowerCAmelCase__ : Union[str, Any] = self.center_crop(lowercase_ ,size=lowercase_ )
if do_rescale:
lowerCAmelCase__ : Any = self.rescale(image=lowercase_ ,scale=lowercase_ ,offset=lowercase_ )
if do_normalize:
lowerCAmelCase__ : List[str] = self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ )
lowerCAmelCase__ : Optional[Any] = to_channel_dimension_format(lowercase_ ,lowercase_ )
return image
def __lowerCAmelCase ( self : Tuple ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : ChannelDimension = ChannelDimension.FIRST ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = offset if offset is not None else self.offset
lowerCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Optional[int] = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase__ : Tuple = make_batched(lowercase_ )
lowerCAmelCase__ : Optional[int] = [
[
self._preprocess_image(
image=lowercase_ ,do_resize=lowercase_ ,size=lowercase_ ,resample=lowercase_ ,do_center_crop=lowercase_ ,crop_size=lowercase_ ,do_rescale=lowercase_ ,rescale_factor=lowercase_ ,offset=lowercase_ ,do_normalize=lowercase_ ,image_mean=lowercase_ ,image_std=lowercase_ ,data_format=lowercase_ ,)
for img in video
]
for video in videos
]
lowerCAmelCase__ : Dict = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __SCREAMING_SNAKE_CASE ( A_=None ):
if subparsers is not None:
lowerCAmelCase__ : Optional[Any] = subparsers.add_parser('''test''' )
else:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCAmelCase__ : Optional[Any] = script_name
else:
lowerCAmelCase__ : Any = f'--config_file={args.config_file} {script_name}'
lowerCAmelCase__ : List[Any] = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase__ : int = execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = test_command_parser()
lowerCAmelCase__ : List[Any] = parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 106 | 1 |
"""simple docstring"""
import functools
from typing import Any
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or len(lowerCAmelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = "WORD_KEEPER"
for word in words:
__SCREAMING_SNAKE_CASE = trie
for c in word:
if c not in trie_node:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = trie_node[c]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCAmelCase_ ) -> bool:
if index == len_string:
return True
__SCREAMING_SNAKE_CASE = trie
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = trie_node.get(string[i] , lowerCAmelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase_ , lowerCAmelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : int = CLIPConfig
snake_case__ : str = ["CLIPEncoderLayer"]
def __init__( self : Optional[int] , UpperCAmelCase__ : CLIPConfig ) -> Dict:
super().__init__(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=0.5 , UpperCAmelCase__ : Optional[int]=0.5 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.vision_model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = self.p_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
__SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
__SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
__SCREAMING_SNAKE_CASE = self.w_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = watermark_detected.flatten()
__SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
__SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 195 | 1 |
SCREAMING_SNAKE_CASE :dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str , lowerCAmelCase_ :float )->float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(lowerCAmelCase_ )}'''
)
raise ValueError(lowerCAmelCase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str = "cpu" , lowerCAmelCase_ :Union[str, None] = None )->None:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
snake_case_ = v.half()
if save_path is None: # overwrite src_path
snake_case_ = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 159 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a_ ( lowerCAmelCase_ : List[Any] ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ):
__lowerCAmelCase = 'mock-s3-bucket'
__lowerCAmelCase = F"""s3://{mock_bucket}"""
__lowerCAmelCase = extract_path_from_uri(lowerCAmelCase_ )
assert dataset_path.startswith('s3://' ) is False
__lowerCAmelCase = './local/path'
__lowerCAmelCase = extract_path_from_uri(lowerCAmelCase_ )
assert dataset_path == new_dataset_path
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = is_remote_filesystem(lowerCAmelCase_ )
assert is_remote is True
__lowerCAmelCase = fsspec.filesystem('file' )
__lowerCAmelCase = is_remote_filesystem(lowerCAmelCase_ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : int, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
__lowerCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCAmelCase = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase_ )
__lowerCAmelCase = fsspec.filesystem(compression_fs_class.protocol, fo=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = os.path.basename(lowerCAmelCase_ )
__lowerCAmelCase = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f, open(lowerCAmelCase_, encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol', ['zip', 'gzip'] )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
__lowerCAmelCase = compressed_file_paths[protocol]
__lowerCAmelCase = 'dataset.jsonl'
__lowerCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
__lowerCAmelCase , *__lowerCAmelCase = fsspec.get_fs_token_paths(lowerCAmelCase_ )
assert fs.isfile(lowerCAmelCase_ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = hf_api.dataset_info(lowerCAmelCase_, token=lowerCAmelCase_ )
__lowerCAmelCase = HfFileSystem(repo_info=lowerCAmelCase_, token=lowerCAmelCase_ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowerCAmelCase_ ) as f:
assert hffs.open('data/text_data.txt', 'r' ).read() == f.read()
def a_ ( ):
__lowerCAmelCase = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase_, lowerCAmelCase_, clobber=lowerCAmelCase_ )
with pytest.warns(lowerCAmelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase_ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 207 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead', lowerCAmelCase_, )
if isinstance(lowerCAmelCase_, torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
__lowerCAmelCase = image.transpose(0, 3, 1, 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(image[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return image
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCAmelCase_, torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(mask[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return mask
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : int = 2_5_0 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(lowerCAmelCase_ )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(lowerCAmelCase_ )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 207 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> Any:
'''simple docstring'''
try:
lowercase = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowercase = 2
lowercase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase = i
while n % i == 0:
lowercase = n // i
i += 1
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 351 | """simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32 | 0 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 400_0000 ):
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(UpperCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Dict:
requires_backends(self ,["""bs4"""] )
super().__init__(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase__ : Tuple = parent.find_all(child.name ,recursive=lowerCAmelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ ,1 ) if s is child ) )
lowerCAmelCase__ : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = BeautifulSoup(lowerCAmelCase__ ,"""html.parser""" )
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : str = []
for element in html_code.descendants:
if type(lowerCAmelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase__ : int = html.unescape(lowerCAmelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.xpath_soup(lowerCAmelCase__ )
stringaxtag_seq.append(lowerCAmelCase__ )
stringaxsubs_seq.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = """"""
for tagname, subs in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self ,__UpperCAmelCase ) -> BatchFeature:
lowerCAmelCase__ : Any = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = True
elif isinstance(lowerCAmelCase__ ,(list, tuple) ):
if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] ,lowerCAmelCase__ ):
lowerCAmelCase__ : Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(lowerCAmelCase__ )}.""" )
lowerCAmelCase__ : List[Any] = bool(isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(html_strings[0] ,lowerCAmelCase__ )) )
if not is_batched:
lowerCAmelCase__ : Union[str, Any] = [html_strings]
# Get nodes + xpaths
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
for html_string in html_strings:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.get_three_from_single(lowerCAmelCase__ )
nodes.append(lowerCAmelCase__ )
lowerCAmelCase__ : Optional[int] = []
for node, tag_list, sub_list in zip(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = self.construct_xpath(lowerCAmelCase__ ,lowerCAmelCase__ )
xpath_strings.append(lowerCAmelCase__ )
xpaths.append(lowerCAmelCase__ )
# return as Dict
lowerCAmelCase__ : Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
lowerCAmelCase__ : int = BatchFeature(data=lowerCAmelCase__ ,tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 366 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[Union[str, Path]] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[int] = None
__lowercase : int = 1
__lowercase : Optional[Union[str, bool]] = None
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
def UpperCAmelCase_ ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 184 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """openai-gpt"""
_UpperCamelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=4_0478 , snake_case=512 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=1E-5 , snake_case=0.02 , snake_case="cls_index" , snake_case=True , snake_case=None , snake_case=True , snake_case=0.1 , **snake_case , ):
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = afn
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_first_dropout
lowercase = summary_proj_to_labels
super().__init__(**snake_case )
| 195 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_init_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_evaluate' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_predict' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_save' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_log' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionModelConfig(a=snake_case , b=snake_case )
lowercase = RegressionPreTrainedModel(snake_case )
lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ['on_init_end', 'on_train_begin']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader() )
lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=snake_case )
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=snake_case )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 195 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
@register_to_config
def __init__( self: Union[str, Any] , _lowerCAmelCase: int = 16 , _lowerCAmelCase: int = 88 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: str = "geglu" , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , ):
super().__init__()
lowercase :Union[str, Any] = num_attention_heads
lowercase :Union[str, Any] = attention_head_dim
lowercase :Tuple = num_attention_heads * attention_head_dim
lowercase :int = in_channels
lowercase :int = torch.nn.GroupNorm(num_groups=_lowerCAmelCase , num_channels=_lowerCAmelCase , eps=1e-6 , affine=_lowerCAmelCase )
lowercase :Dict = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
# 3. Define transformers blocks
lowercase :Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , cross_attention_dim=_lowerCAmelCase , activation_fn=_lowerCAmelCase , attention_bias=_lowerCAmelCase , double_self_attention=_lowerCAmelCase , norm_elementwise_affine=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
lowercase :Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: int=None , _lowerCAmelCase: Dict=1 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: bool = True , ):
lowercase :Dict = hidden_states.shape
lowercase :str = batch_frames // num_frames
lowercase :Tuple = hidden_states
lowercase :str = hidden_states[None, :].reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase :str = self.norm(_lowerCAmelCase )
lowercase :int = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCAmelCase , _lowerCAmelCase )
lowercase :int = self.proj_in(_lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase :Optional[Any] = block(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase , cross_attention_kwargs=_lowerCAmelCase , class_labels=_lowerCAmelCase , )
# 3. Output
lowercase :Any = self.proj_out(_lowerCAmelCase )
lowercase :str = (
hidden_states[None, None, :]
.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase :Union[str, Any] = hidden_states.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :Tuple = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCAmelCase )
| 360 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : Any = '▁'
A__ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
A__ : int = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
A__ : Optional[Any] = {'vinai/bartpho-syllable': 10_24}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : int="<s>", lowerCamelCase : Any="</s>", lowerCamelCase : Tuple="</s>", lowerCamelCase : Union[str, Any]="<s>", lowerCamelCase : Union[str, Any]="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : Optional[Any]="<mask>", lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Tuple, ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(lowerCamelCase, '''r''', encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : int, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self : str, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[Any], lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self : List[Any], lowerCamelCase : str ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 207 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
lowercase__ = self.image_processor
def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207 | 1 |
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =0
for ch in input_str:
lowerCamelCase__: Union[str, Any] =ord(__a )
lowerCamelCase__: int =pow(2 , __a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "distilbert"
lowercase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Any , UpperCAmelCase_ : str=30_522 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=4 * 768 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =sinusoidal_pos_embds
lowerCamelCase__: str =n_layers
lowerCamelCase__: str =n_heads
lowerCamelCase__: str =dim
lowerCamelCase__: Optional[Any] =hidden_dim
lowerCamelCase__: Dict =dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: int =activation
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Optional[Any] =qa_dropout
lowerCamelCase__: int =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Dict ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 273 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self : Tuple , a : Optional[int] , a : Optional[Any]=13 , a : List[Any]=64 , a : str=2 , a : int=3 , a : Union[str, Any]=True , a : List[str]=True , a : str=32 , a : List[Any]=5 , a : int=4 , a : Optional[int]=37 , a : Any="gelu" , a : List[Any]=0.1 , a : Tuple=0.1 , a : int=10 , a : Optional[int]=0.02 , a : int=[1, 16, 4, 4] , a : Dict=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__lowerCamelCase = (self.image_size // 32) ** 2
__lowerCamelCase = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Any , a : Optional[int] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = ViTHybridModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Dict , a : Dict , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = ViTHybridForImageClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCamelCase : Tuple =(
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : List[Any] =False
lowerCamelCase : Tuple =False
lowerCamelCase : List[str] =False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ViTHybridModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(a )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=a )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__lowerCamelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = ViTHybridModel.from_pretrained(a )
self.assertIsNotNone(a )
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=a , return_tensors='''pt''' ).to(a )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**a )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=a , return_tensors='''pt''' )
__lowerCamelCase = model(**a )
__lowerCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
__lowerCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 67 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
a_ : Tuple = []
for line in lines:
a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Tuple = '\n'.join(__A )
# Make a hash from all this code
a_ : Tuple = full_str.encode('utf-8' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Dict = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 32 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 50 ):
__lowerCAmelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 351 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 102 | 0 |
'''simple docstring'''
import math
def snake_case_ (_a : float , _a : float ):
return math.pow(_a , 2 ) - a
def snake_case_ (_a : float ):
return 2 * x
def snake_case_ (_a : float ):
UpperCAmelCase = 2.0
while start <= a:
UpperCAmelCase = math.pow(_a , 2 )
return start
def snake_case_ (_a : float , _a : int = 9_9_9_9 , _a : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase = get_initial_point(_a )
for _ in range(_a ):
UpperCAmelCase = value
UpperCAmelCase = value - fx(_a , _a ) / fx_derivative(_a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(_A , _A )
lowerCamelCase__ : List[Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = "rougeLsum"
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
lowerCamelCase__ : str = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
lowerCamelCase__ : Any = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
assert score_sep == score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase__ : Tuple = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase__ : str = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] , newline_sep=_A )["rougeLsum"]
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase__ : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(_A , _A )
lowerCamelCase__ : str = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_A )
assert isinstance(_A , _A )
| 184 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=None , ):
if attention_mask is None:
__UpperCamelCase =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCamelCase =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCamelCase =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=32 , A_=2 , A_=1 , A_=0 , A_=0.02 , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =initializer_range
def _a ( self ) -> List[str]:
__UpperCamelCase =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase =shift_tokens_right(A_ , 1 , 2 )
__UpperCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A_ , )
__UpperCamelCase =prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def _a ( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =20
__UpperCamelCase =model_class_name(A_ )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase =model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
__UpperCamelCase =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase =model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
__UpperCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase =model.decode(
decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , )
__UpperCamelCase =model.decode(A_ , A_ )
__UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =20
__UpperCamelCase =model_class_name(A_ )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase =model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
__UpperCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase =model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
__UpperCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase =model.decode(
decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , )
__UpperCamelCase =model.decode(A_ , A_ , decoder_attention_mask=A_ )
__UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = 9_9
def _a ( self ) -> Any:
__UpperCamelCase =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase =input_ids.shape[0]
__UpperCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self._get_config_and_data()
__UpperCamelCase =FlaxBlenderbotForConditionalGeneration(A_ )
__UpperCamelCase =lm_model(input_ids=A_ )
__UpperCamelCase =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , A_ )
def _a ( self ) -> Any:
__UpperCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase =FlaxBlenderbotForConditionalGeneration(A_ )
__UpperCamelCase =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase =lm_model(input_ids=A_ , decoder_input_ids=A_ )
__UpperCamelCase =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase =shift_tokens_right(A_ , 1 , 2 )
__UpperCamelCase =np.equal(A_ , 1 ).astype(np.floataa ).sum()
__UpperCamelCase =np.equal(A_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase__ ( A_ , unittest.TestCase , A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Any = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ) -> Dict:
__UpperCamelCase =FlaxBlenderbotModelTester(self )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A_ , A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase =self._prepare_for_class(A_ , A_ )
__UpperCamelCase =model_class(A_ )
@jax.jit
def encode_jitted(A_ , A_=None , **A_ ):
return model.encode(input_ids=A_ , attention_mask=A_ )
with self.subTest('JIT Enabled' ):
__UpperCamelCase =encode_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase =encode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(A_ , A_ , A_ ):
return model.decode(
decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase =decode_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase =decode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase =model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase =np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase =model(A_ )
self.assertIsNotNone(A_ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _a ( self ) -> Any:
__UpperCamelCase ={'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__UpperCamelCase ={'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__UpperCamelCase =FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=A_ )
__UpperCamelCase =BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__UpperCamelCase =['Sam']
__UpperCamelCase =tokenizer(A_ , return_tensors='jax' )
__UpperCamelCase =model.generate(**A_ , **A_ )
__UpperCamelCase ='Sam is a great name. It means "sun" in Gaelic.'
__UpperCamelCase =tokenizer.batch_decode(A_ , **A_ )
assert generated_txt[0].strip() == tgt_text
| 117 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 117 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ : Any = '\\n Text data.\n Second line of data.'
lowerCAmelCase__ : Dict = 'file'
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
UpperCAmelCase__ = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
with zstd.open(SCREAMING_SNAKE_CASE_ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def a_ ( lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
UpperCAmelCase__ = input_paths[compression_format]
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCAmelCase__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCAmelCase__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 'custom_cache'
UpperCAmelCase__ = 'custom_extracted_dir'
UpperCAmelCase__ = tmp_path / 'custom_extracted_path'
if default_extracted:
UpperCAmelCase__ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase__ = xz_file
UpperCAmelCase__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
UpperCAmelCase__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
UpperCAmelCase__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
UpperCAmelCase__ = './__missing_file__.txt'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCAmelCase__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def a_ ( ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get('https://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get('ftp://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get('s3://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head('s3://huggingface.co' )
| 98 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase_ (_lowerCAmelCase : Any , _lowerCAmelCase : List[str]=10_00 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : Any = n - 1
__UpperCamelCase : str = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Dict = 0
while count < prec:
__UpperCamelCase : Tuple = random.randint(2 , n - 1 )
__UpperCamelCase : Optional[Any] = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
__UpperCamelCase : int = True
for _ in range(lowercase_ ):
if b == n - 1:
__UpperCamelCase : Dict = False
break
__UpperCamelCase : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase : str = abs(int(input("Enter bound : ").strip()))
print("Here\'s the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i))) | 362 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Union[str, Any] = num_stages
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : List[Any] = out_features
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : List[Any] = num_stages
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : List[str] = False
lowercase : Any = False
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = UperNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
__UpperCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : List[str] = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Any = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
__UpperCamelCase : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) | 171 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A : List[Any] = NewType("DataClass", Any)
__A : Optional[Any] = NewType("DataClassType", Any)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Callable[[str], Any]:
'''simple docstring'''
UpperCAmelCase = {str(UpperCamelCase__ ): choice for choice in choices}
return lambda UpperCamelCase__ : str_to_choice.get(UpperCamelCase__ , UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( *,
UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = dataclasses.MISSING , UpperCamelCase__ = dataclasses.MISSING , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase = {}
if aliases is not None:
UpperCAmelCase = aliases
if help is not None:
UpperCAmelCase = help
return dataclasses.field(metadata=UpperCamelCase__ , default=UpperCamelCase__ , default_factory=UpperCamelCase__ , **UpperCamelCase__ )
class A_ (a_ ):
UpperCAmelCase__ = 42
def __init__( self , _A , **_A ):
'''simple docstring'''
if "formatter_class" not in kwargs:
UpperCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**_A )
if dataclasses.is_dataclass(_A ):
UpperCAmelCase = [dataclass_types]
UpperCAmelCase = list(_A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A )
@staticmethod
def _lowercase ( _A , _A ):
'''simple docstring'''
UpperCAmelCase = F"""--{field.name}"""
UpperCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
UpperCAmelCase = kwargs.pop('''aliases''' , [] )
if isinstance(_A , _A ):
UpperCAmelCase = [aliases]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_A , '''UnionType''' ) and isinstance(_A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(_A ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , _A ) and issubclass(field.type , _A )):
if origin_type is Literal:
UpperCAmelCase = field.type.__args__
else:
UpperCAmelCase = [x.value for x in field.type]
UpperCAmelCase = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
else:
UpperCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase = copy(_A )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase = '''?'''
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase = True
elif isclass(_A ) and issubclass(_A , _A ):
UpperCAmelCase = field.type.__args__[0]
UpperCAmelCase = '''+'''
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase = True
else:
UpperCAmelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
else:
UpperCAmelCase = True
parser.add_argument(_A , *_A , **_A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_A )
def _lowercase ( self , _A ):
'''simple docstring'''
if hasattr(_A , '''_argument_group_name''' ):
UpperCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase = self
try:
UpperCAmelCase = get_type_hints(_A )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_A ):
UpperCAmelCase = '''.'''.join(map(_A , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_A ):
if not field.init:
continue
UpperCAmelCase = type_hints[field.name]
self._parse_dataclass_field(_A , _A )
def _lowercase ( self , _A=None , _A=False , _A=True , _A=None , _A=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase = []
if args_filename:
args_files.append(Path(_A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase , UpperCAmelCase = args_file_parser.parse_known_args(args=_A )
UpperCAmelCase = vars(_A ).get(args_file_flag.lstrip('''-''' ) , _A )
if cmd_args_file_paths:
args_files.extend([Path(_A ) for p in cmd_args_file_paths] )
UpperCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase , UpperCAmelCase = self.parse_known_args(args=_A )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(_A ) if f.init}
UpperCAmelCase = {k: v for k, v in vars(_A ).items() if k in keys}
for k in keys:
delattr(_A , _A )
UpperCAmelCase = dtype(**_A )
outputs.append(_A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
UpperCAmelCase = set(args.keys() )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(_A ) if f.init}
UpperCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase = dtype(**_A )
outputs.append(_A )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_A )}""" )
return tuple(_A )
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
with open(Path(_A ) , encoding='''utf-8''' ) as open_json_file:
UpperCAmelCase = json.loads(open_json_file.read() )
UpperCAmelCase = self.parse_dict(_A , allow_extra_keys=_A )
return tuple(_A )
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(_A ).read_text() ) , allow_extra_keys=_A )
return tuple(_A )
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
"""simple docstring"""
lowerCamelCase__ = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 363 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase__ = """hf-internal-testing/tiny-random-bert"""
lowerCamelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
lowerCamelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_SCREAMING_SNAKE_CASE ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'refs' , 'main' ) ) as f:
__lowerCAmelCase : List[Any] = f.read()
self.assertEqual(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'snapshots' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# File is cached at the same place the second time.
__lowerCAmelCase : Union[str, Any] = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Using a specific revision to test the full commit hash.
__lowerCAmelCase : Union[str, Any] = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , revision='9b8c223' )
self.assertEqual(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'snapshots' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'is not a valid model identifier' ):
__lowerCAmelCase : Optional[Any] = cached_file('tiny-random-bert' , _SCREAMING_SNAKE_CASE )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'is not a valid git identifier' ):
__lowerCAmelCase : str = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , revision='aaaa' )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'does not appear to have a file named' ):
__lowerCAmelCase : Optional[Any] = cached_file(_SCREAMING_SNAKE_CASE , 'conf' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'does not appear to have a file named' ):
__lowerCAmelCase : Optional[int] = cached_file(_SCREAMING_SNAKE_CASE , 'conf' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'refs' , 'main' ) ) as f:
__lowerCAmelCase : Tuple = f.read()
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , '.no_exist' , _SCREAMING_SNAKE_CASE , 'conf' ) ) )
__lowerCAmelCase : List[Any] = cached_file(_SCREAMING_SNAKE_CASE , 'conf' , _raise_exceptions_for_missing_entries=_SCREAMING_SNAKE_CASE )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = cached_file(_SCREAMING_SNAKE_CASE , 'conf' , local_files_only=_SCREAMING_SNAKE_CASE , _raise_exceptions_for_missing_entries=_SCREAMING_SNAKE_CASE )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = mock.Mock()
__lowerCAmelCase : Tuple = 5_00
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : Dict = HTTPError
__lowerCAmelCase : str = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
__lowerCAmelCase : Optional[Any] = cached_file(_SCREAMING_SNAKE_CASE , 'conf' , _raise_exceptions_for_connection_errors=_SCREAMING_SNAKE_CASE )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _SCREAMING_SNAKE_CASE ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _SCREAMING_SNAKE_CASE ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , _SCREAMING_SNAKE_CASE )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , _SCREAMING_SNAKE_CASE , revision='ahaha' )
__lowerCAmelCase : Union[str, Any] = get_file_from_repo('bert-base-cased' , _SCREAMING_SNAKE_CASE )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCAmelCase : List[Any] = json.loads(open(_SCREAMING_SNAKE_CASE , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_68 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : str = Path(_SCREAMING_SNAKE_CASE ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_SCREAMING_SNAKE_CASE , 'a.txt' ) , str(_SCREAMING_SNAKE_CASE ) )
self.assertIsNone(get_file_from_repo(_SCREAMING_SNAKE_CASE , 'b.txt' ) ) | 182 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , ) ->tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
a__ : str = KandinskyImgaImgPipeline
a__ : Any = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a__ : int = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a__ : List[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : Union[str, Any] = False
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_= XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_= MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_= MultilingualCLIP(lowercase_ )
UpperCAmelCase_= text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_= {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_= UNetaDConditionModel(**lowercase_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_= VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
UpperCAmelCase_= self.dummy_text_encoder
UpperCAmelCase_= self.dummy_tokenizer
UpperCAmelCase_= self.dummy_unet
UpperCAmelCase_= self.dummy_movq
UpperCAmelCase_= {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase_= DDIMScheduler(**lowercase_ )
UpperCAmelCase_= {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=0 ) -> List[str]:
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
UpperCAmelCase_= floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowercase_ ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_= torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_= {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**lowercase_ )
UpperCAmelCase_= pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_= pipe(**self.get_dummy_inputs(lowercase_ ) )
UpperCAmelCase_= output.images
UpperCAmelCase_= pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_= """A red cartoon frog, 4k"""
UpperCAmelCase_= KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
UpperCAmelCase_= KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
UpperCAmelCase_= pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_, UpperCAmelCase_= pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase_= pipeline(
lowercase_ , image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
UpperCAmelCase_= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 356 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
snake_case__ : int = re.compile(r'\s+')
def _a ( lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a ( lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
__A = [len(lowerCamelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(lowerCamelCase ), "line_max": max(lowerCamelCase )}
def _a ( lowerCamelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a ( lowerCamelCase: Dict , lowerCamelCase: Optional[Any] ) -> Any:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: List[str]=5 ) -> Any:
'''simple docstring'''
__A = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
__A = example['''content'''].splitlines()
for _, line in zip(range(lowerCamelCase ) , lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: List[str]=5 , lowerCamelCase: str=0.05 ) -> int:
'''simple docstring'''
__A = ['''unit tests''', '''test file''', '''configuration file''']
__A = example['''content'''].splitlines()
__A = 0
__A = 0
# first test
for _, line in zip(range(lowerCamelCase ) , lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__A = example['''content'''].count('''\n''' )
__A = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a ( lowerCamelCase: List[str] ) -> int:
'''simple docstring'''
__A = ['''def ''', '''class ''', '''for ''', '''while ''']
__A = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a ( lowerCamelCase: Any , lowerCamelCase: Tuple=4 ) -> Optional[int]:
'''simple docstring'''
__A = example['''content'''].splitlines()
__A = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a ( lowerCamelCase: Any ) -> int:
'''simple docstring'''
__A = tokenizer(example['''content'''] , truncation=lowerCamelCase )['''input_ids''']
__A = len(example['''content'''] ) / len(lowerCamelCase )
return {"ratio": ratio}
def _a ( lowerCamelCase: Optional[Any] ) -> str:
'''simple docstring'''
__A = {}
results.update(get_hash(lowerCamelCase ) )
results.update(line_stats(lowerCamelCase ) )
results.update(alpha_stats(lowerCamelCase ) )
results.update(char_token_ratio(lowerCamelCase ) )
results.update(is_autogenerated(lowerCamelCase ) )
results.update(is_config_or_test(lowerCamelCase ) )
results.update(has_no_keywords(lowerCamelCase ) )
results.update(has_few_assignments(lowerCamelCase ) )
return results
def _a ( lowerCamelCase: int , lowerCamelCase: List[Any] , lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
if not check_uniques(lowerCamelCase , lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a ( lowerCamelCase: int ) -> int:
'''simple docstring'''
with open(lowerCamelCase , '''rb''' ) as f_in:
with gzip.open(str(lowerCamelCase ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase , lowerCamelCase )
os.unlink(lowerCamelCase )
# Settings
snake_case__ : Any = HfArgumentParser(PreprocessingArguments)
snake_case__ : Dict = parser.parse_args()
if args.num_workers is None:
snake_case__ : Optional[int] = multiprocessing.cpu_count()
snake_case__ : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
snake_case__ : Tuple = time.time()
snake_case__ : Optional[int] = load_dataset(args.dataset_name, split='train')
print(f'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
snake_case__ : Dict = time.time()
snake_case__ : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(f'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
snake_case__ : int = set(ds.unique('hash'))
snake_case__ : Union[str, Any] = len(uniques) / len(ds)
print(f'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
snake_case__ : Optional[Any] = time.time()
snake_case__ : Optional[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'Time to filter dataset: {time.time()-t_start:.2f}')
print(f'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
snake_case__ : Union[str, Any] = time.time()
snake_case__ , snake_case__ : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(f'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
snake_case__ : int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
snake_case__ : str = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
snake_case__ : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
snake_case__ : str = str(data_dir / f'file-{file_number+1:012}.json')
snake_case__ : Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'Time to save dataset: {time.time()-t_start:.2f}')
| 117 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
snake_case__ : Optional[int] = parser.parse_args()
if args.model_type == "bert":
snake_case__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ : Union[str, Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
snake_case__ : Optional[int] = model.state_dict()
snake_case__ : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ : Tuple = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
snake_case__ : Optional[Any] = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
snake_case__ : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ : Union[str, Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
snake_case__ : Dict = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
snake_case__ : Optional[int] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
snake_case__ : Optional[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
snake_case__ : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
snake_case__ : Optional[int] = state_dict['cls.predictions.decoder.weight']
snake_case__ : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ : int = state_dict[f'cls.predictions.transform.dense.{w}']
snake_case__ : Optional[int] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 117 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"vocab_file": "spiece.model"}
__UpperCamelCase : Optional[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Dict=False , __magic_name__ :Union[str, Any]=False , __magic_name__ :Union[str, Any]=False , __magic_name__ :int=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]=None , __magic_name__ :List[Any] = None , **__magic_name__ :Any , ):
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
a = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a = '''<|endoftext|>''' if eos_token is None else eos_token
a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a = unk_token if pad_token is None else pad_token
a = eos_token if bos_token is None else bos_token
else:
a = '''<pad>''' if pad_token is None else pad_token
a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a = re.compile(
F'[{"".join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self :Optional[Any] ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
a = self.non_printing_characters_re.sub("""""" , _a )
# Normalize whitespaces
a = ''''''.join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
a = unicodedata.normalize("""NFC""" , _a )
return text
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Optional[int] , **__magic_name__ :List[str] ):
'''simple docstring'''
a = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ):
'''simple docstring'''
return self.sp_model.PieceToId(_a )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(_a )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return out_string
def lowerCamelCase__ ( self :str , __magic_name__ :str ):
'''simple docstring'''
a = []
a = ''''''
a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
a = True
a = []
else:
current_sub_tokens.append(_a )
a = False
out_string += self.sp_model.decode(_a )
return out_string
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :int , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] , __magic_name__ :Tuple = False ):
'''simple docstring'''
if isinstance(_a , _a ):
a = self.preprocess_text(_a )
a = self.sp_model.encode(_a )
else:
a = [self.preprocess_text(_a ) for t in text]
a = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
a = torch.tensor(_a )
return token_ids
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
return self.sp_model.decode(_a )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Dict ):
'''simple docstring'''
a = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
a = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_a ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_a )
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : Dict , A : Optional[int]=7 , A : Tuple=3 , A : Optional[Any]=10 , A : int=18 , A : Dict=30 , A : List[str]=400 , A : int=True , A : Optional[Any]=None , A : Optional[Any]=True , A : List[Any]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , A : Optional[int]=None , ):
_UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = num_frames
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Any = crop_size
def _A ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = VivitImageProcessor if is_vision_available() else None
def _A ( self : int ):
_UpperCAmelCase : Tuple = VivitImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "size" ) )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Tuple ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=99 , _lowerCamelCase=13 , _lowerCamelCase=16 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=30 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Union[str, Any] = decoder_seq_length
# For common tests
UpperCAmelCase__ : int = self.decoder_seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Optional[Any] = use_attention_mask
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : List[str] = d_model
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : Any = decoder_layers
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : Any = decoder_attention_heads
UpperCAmelCase__ : List[str] = decoder_attention_heads
UpperCAmelCase__ : List[str] = eos_token_id
UpperCAmelCase__ : int = bos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : Any = decoder_start_token_id
UpperCAmelCase__ : Dict = use_cache
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = decoder_seq_length
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : List[str] = 1
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = TrOCRDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
UpperCAmelCase__ : Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCAmelCase__ : str = model(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) + 1 )
UpperCAmelCase__ : List[Any] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : int = model(_lowerCamelCase )["""last_hidden_state"""]
UpperCAmelCase__ : Dict = model(_lowerCamelCase , past_key_values=_lowerCamelCase )["""last_hidden_state"""]
# select random slice
UpperCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : int = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase__ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a (self ):
"""simple docstring"""
pass
| 171 | 0 |
"""simple docstring"""
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = len(_a )
_lowerCamelCase = [0] * len_array
if len_array > 0:
_lowerCamelCase = array[0]
for i in range(1 , _a ):
_lowerCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = F"""class {class_name}("""
_lowerCamelCase = F"""{4 * " "}def {test_name}("""
_lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
for line in lines:
if line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
_lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = {l.strip() for l in f.readlines()}
else:
_lowerCamelCase = None
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = defaultdict(lowercase_ )
for line in correct_lines:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 73 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _snake_case ( UpperCamelCase_ ):
'''simple docstring'''
A__ : List[Any] = "roberta-prelayernorm"
def __init__( self: Tuple ,lowerCamelCase_: int=50265 ,lowerCamelCase_: Optional[int]=768 ,lowerCamelCase_: List[str]=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: Optional[int]=1e-12 ,lowerCamelCase_: Dict=1 ,lowerCamelCase_: Any=0 ,lowerCamelCase_: Dict=2 ,lowerCamelCase_: Dict="absolute" ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Tuple=None ,**lowerCamelCase_: List[Any] ,) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,**UpperCamelCase__ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : int = classifier_dropout
class _snake_case ( UpperCamelCase_ ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> Union[str, Any]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 345 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = split_dict._to_yaml_list()
assert len(_lowercase ) == len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = SplitDict._from_yaml_list(_lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : Any = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def A ( _lowercase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE : List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCamelCase__ : Optional[Any] =model
UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ )
UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ )
def __call__( self : Any , **lowercase_ : Any ):
UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ , lowercase_ )
@staticmethod
def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCamelCase__ : List[str] ='''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ):
if os.path.isfile(lowercase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
UpperCamelCase__ : Any =OnnxRuntimeModel.load_model(
os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ )
UpperCamelCase__ : List[str] =Path(lowercase_ )
# load model from hub
else:
# download model
UpperCamelCase__ : Tuple =hf_hub_download(
repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , )
UpperCamelCase__ : Any =Path(lowercase_ ).parent
UpperCamelCase__ : List[Any] =Path(lowercase_ ).name
UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ )
return cls(model=lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Dict =None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
| 157 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( __A, __A, __A, __A, __A = None, __A = None, __A = None, ) -> str:
'''simple docstring'''
if config_name_or_path is None:
UpperCAmelCase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ = question_encoder_name_or_path
UpperCAmelCase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ = RagConfig.from_pretrained(__A )
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ = gen_config
UpperCAmelCase__ = question_encoder_config
UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator(
__A, __A, config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 65 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=7, _snake_case : int=True, _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=9_9, _snake_case : Optional[Any]=3_2, _snake_case : Tuple=5, _snake_case : str=4, _snake_case : Any=3_7, _snake_case : int="gelu", _snake_case : Optional[Any]=0.1, _snake_case : str=0.1, _snake_case : str=5_1_2, _snake_case : Dict=1_6, _snake_case : str=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : Optional[int]=3, _snake_case : Union[str, Any]=4, _snake_case : Tuple=None, ) ->Optional[Any]:
snake_case__ : Optional[int] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : str = is_training
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = num_choices
snake_case__ : int = scope
snake_case__ : List[str] = self.vocab_size - 1
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : str = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Any, _snake_case : List[Any], _snake_case : Tuple, *_snake_case : Optional[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, head_mask=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : List[Any], *_snake_case : Dict ) ->Optional[int]:
snake_case__ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], *_snake_case : List[Any] ) ->Optional[int]:
snake_case__ : List[str] = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[Any], *_snake_case : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : List[str] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[Any] = config_and_inputs
snake_case__ : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : Any=False ) ->Tuple:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=_snake_case, )
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=_snake_case, )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : List[str] = OpenAIGPTModelTester(self )
snake_case__ : Any = ConfigTester(self, config_class=_snake_case, n_embd=3_7 )
def lowercase_ ( self : Optional[int] ) ->str:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->Tuple:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def lowercase_ ( self : int ) ->str:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def lowercase_ ( self : Optional[Any] ) ->str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
snake_case__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=_snake_case ) # the president is
snake_case__ : int = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case__ : Optional[int] = model.generate(_snake_case, do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist(), _snake_case )
| 277 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case_ = 1_0000
snake_case_ = None
snake_case_ = None
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case_ = ParquetConfig
def __magic_name__ ( self : str ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[str] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE__ : Optional[int] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =data_files
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : Optional[Any] =[dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__ : str =[]
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : Any =[dl_manager.iter_files(__lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowercase ):
with open(__lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] =datasets.Features.from_arrow_schema(pq.read_schema(__lowercase ) )
break
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={'''files''': files} ) )
return splits
def __magic_name__ ( self : Tuple , __lowercase : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ : List[str] =table_cast(__lowercase , self.info.features.arrow_schema )
return pa_table
def __magic_name__ ( self : Optional[int] , __lowercase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pq.ParquetFile(__lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(__lowercase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(__lowercase )}: {e}" )
raise | 222 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =precision
SCREAMING_SNAKE_CASE__ : int =ceil(precision / 1_4 )
SCREAMING_SNAKE_CASE__ : int =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
SCREAMING_SNAKE_CASE__ : Tuple =1
SCREAMING_SNAKE_CASE__ : Any =1_3_5_9_1_4_0_9
SCREAMING_SNAKE_CASE__ : List[Any] =Decimal(UpperCamelCase__ )
for k in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''') | 222 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
__A = logging.get_logger()
# the current default level is logging.WARNING
__A = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A )
def UpperCamelCase_ ( self : Dict ):
__A = logging.get_verbosity()
__A = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out ,msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out ,"" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out ,msg + "\n" )
# restore to the original level
logging.set_verbosity(A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def UpperCamelCase_ ( self : Optional[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__A = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A = os.getenv("TRANSFORMERS_VERBOSITY" ,A )
__A = logging.log_levels[env_level_str]
__A = logging.get_verbosity()
self.assertEqual(
A ,A ,f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' ,)
# restore to the original level
__A = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def UpperCamelCase_ ( self : Tuple ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__A = logging.logging.getLogger()
with CaptureLogger(A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" ,cl.out )
# no need to restore as nothing was changed
def UpperCamelCase_ ( self : int ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__A = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out ,"" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out ,msg + "\n" )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 15 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ , UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for i in range(UpperCAmelCase ):
a_ = dataset[i]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ):
a_ = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(UpperCAmelCase ):
a_ = dataset[i]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(0 , UpperCAmelCase , UpperCAmelCase ):
a_ = dataset[i : i + batch_size]
def UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
a_ = {"num examples": SPEED_TEST_N_EXAMPLES}
a_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a_ = generate_example_dataset(
os.path.join(UpperCAmelCase , "dataset.arrow" ) , UpperCAmelCase , num_examples=UpperCAmelCase , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCAmelCase ) )
a_ = func(UpperCAmelCase , **UpperCAmelCase )
print("shuffling dataset" )
a_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(UpperCAmelCase ) )
a_ = func(
UpperCAmelCase , **UpperCAmelCase )
with open(UpperCAmelCase , "wb" ) as f:
f.write(json.dumps(UpperCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase ( __snake_case : list[int] , __snake_case : int , __snake_case : int = 0 , __snake_case : int = -1 ):
if hi < 0:
lowercase_ : List[Any] = len(__snake_case )
while lo < hi:
lowercase_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase_ : str = mid + 1
else:
lowercase_ : List[str] = mid
return lo
def lowercase ( __snake_case : list[int] , __snake_case : int , __snake_case : int = 0 , __snake_case : int = -1 ):
if hi < 0:
lowercase_ : Any = len(__snake_case )
while lo < hi:
lowercase_ : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase_ : Optional[int] = mid + 1
else:
lowercase_ : Any = mid
return lo
def lowercase ( __snake_case : list[int] , __snake_case : int , __snake_case : int = 0 , __snake_case : int = -1 ):
sorted_collection.insert(bisect_left(__snake_case , __snake_case , __snake_case , __snake_case ) , __snake_case )
def lowercase ( __snake_case : list[int] , __snake_case : int , __snake_case : int = 0 , __snake_case : int = -1 ):
sorted_collection.insert(bisect_right(__snake_case , __snake_case , __snake_case , __snake_case ) , __snake_case )
def lowercase ( __snake_case : list[int] , __snake_case : int ):
lowercase_ : Optional[int] = 0
lowercase_ : Tuple = len(__snake_case ) - 1
while left <= right:
lowercase_ : str = left + (right - left) // 2
lowercase_ : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase_ : Dict = midpoint - 1
else:
lowercase_ : Tuple = midpoint + 1
return None
def lowercase ( __snake_case : list[int] , __snake_case : int ):
lowercase_ : int = bisect.bisect_left(__snake_case , __snake_case )
if index != len(__snake_case ) and sorted_collection[index] == item:
return index
return None
def lowercase ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
if right < left:
return None
lowercase_ : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__snake_case , __snake_case , __snake_case , midpoint - 1 )
else:
return binary_search_by_recursion(__snake_case , __snake_case , midpoint + 1 , __snake_case )
if __name__ == "__main__":
__A : Dict = input('''Enter numbers separated by comma:\n''').strip()
__A : List[str] = sorted(int(item) for item in user_input.split(''','''))
__A : Optional[int] = int(input('''Enter a single number to be found in the list:\n'''))
__A : int = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 33 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase_ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase_ : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : Union[str, Any] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Union[str, Any] ):
return 32
@property
def _A ( self : List[Any] ):
return self.time_input_dim
@property
def _A ( self : Any ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_snake_case )
return model
@property
def _A ( self : Optional[Any] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _A ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_snake_case )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=0 ):
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_snake_case )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
SCREAMING_SNAKE_CASE : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = "cpu"
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**_snake_case )
SCREAMING_SNAKE_CASE : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_snake_case ) )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Dict = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE : Any = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
SCREAMING_SNAKE_CASE : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = "A robot, 4k photo"
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 361 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: Distribution , __lowerCamelCase: Any=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=0 ) -> str:
__UpperCAmelCase : Dict = 1.0 if scale is None else scale
__UpperCAmelCase : Tuple = 0.0 if loc is None else loc
super().__init__(__lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowerCamelCase )] )
@property
def _lowerCamelCase ( self: Any ) -> Optional[int]:
return self.base_dist.mean * self.scale + self.loc
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
return self.base_dist.variance * self.scale**2
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
return self.variance.sqrt()
class _snake_case ( nn.Module ):
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: Callable[..., Tuple[torch.Tensor]] , **__lowerCamelCase: Optional[Any] ) -> None:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = args_dim
__UpperCAmelCase : Any = nn.ModuleList([nn.Linear(__lowerCamelCase , __lowerCamelCase ) for dim in args_dim.values()] )
__UpperCAmelCase : Union[str, Any] = domain_map
def _lowerCamelCase ( self: int , __lowerCamelCase: torch.Tensor ) -> Tuple[torch.Tensor]:
__UpperCAmelCase : List[Any] = [proj(__lowerCamelCase ) for proj in self.proj]
return self.domain_map(*__lowerCamelCase )
class _snake_case ( nn.Module ):
def __init__( self: int , __lowerCamelCase: List[Any] ) -> Tuple:
super().__init__()
__UpperCAmelCase : Dict = function
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any , *__lowerCamelCase: List[Any] ) -> List[str]:
return self.function(__lowerCamelCase , *__lowerCamelCase )
class _snake_case :
lowerCamelCase__: type
lowerCamelCase__: int
lowerCamelCase__: Dict[str, int]
def __init__( self: Dict , __lowerCamelCase: int = 1 ) -> None:
__UpperCAmelCase : str = dim
__UpperCAmelCase : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if self.dim == 1:
return self.distribution_class(*__lowerCamelCase )
else:
return Independent(self.distribution_class(*__lowerCamelCase ) , 1 )
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ) -> Distribution:
__UpperCAmelCase : Tuple = self._base_distribution(__lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowerCamelCase , loc=__lowerCamelCase , scale=__lowerCamelCase , event_dim=self.event_dim )
@property
def _lowerCamelCase ( self: str ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def _lowerCamelCase ( self: Optional[Any] ) -> int:
return len(self.event_shape )
@property
def _lowerCamelCase ( self: str ) -> float:
return 0.0
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int ) -> nn.Module:
return ParameterProjection(
in_features=__lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowerCamelCase ( self: str , *__lowerCamelCase: torch.Tensor ) -> int:
raise NotImplementedError()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__lowerCamelCase ) + 4.0 )) / 2.0
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__: type = StudentT
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor ) -> str:
__UpperCAmelCase : List[Any] = cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
__UpperCAmelCase : Optional[int] = 2.0 + cls.squareplus(__lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict[str, int] = {"loc": 1, "scale": 1}
lowerCamelCase__: type = Normal
@classmethod
def _lowerCamelCase ( cls: List[str] , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor ) -> Union[str, Any]:
__UpperCAmelCase : Any = cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCamelCase__: type = NegativeBinomial
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor ) -> int:
__UpperCAmelCase : Dict = cls.squareplus(__lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[str] ) -> Distribution:
__UpperCAmelCase , __UpperCAmelCase : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase ) , 1 )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None ) -> Distribution:
__UpperCAmelCase , __UpperCAmelCase : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 157 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : Any = 1
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def _lowerCamelCase ( self: List[Any] ) -> str:
torch.manual_seed(0 )
__UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self: List[str] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self: int ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
def extract(*__lowerCamelCase: List[str] , **__lowerCamelCase: Optional[int] ):
class _snake_case :
def __init__( self: List[str] ) -> int:
__UpperCAmelCase : Tuple = torch.ones([0] )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] ) -> int:
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
__UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.dummy_vae
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__UpperCAmelCase : int = 77
__UpperCAmelCase : List[str] = self.dummy_image.to(__lowerCamelCase )
__UpperCAmelCase : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
__UpperCAmelCase : List[str] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger"
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__UpperCAmelCase : List[Any] = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__UpperCAmelCase : Tuple = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
__UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
__UpperCAmelCase : Any = self.dummy_vae
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__UpperCAmelCase : Dict = 77
__UpperCAmelCase : Tuple = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
__UpperCAmelCase : Any = unet.half()
__UpperCAmelCase : Optional[Any] = vae.half()
__UpperCAmelCase : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
__UpperCAmelCase : Any = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : int = "A painting of a squirrel eating a burger"
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Any = alt_pipe(
[prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase : List[Any] = init_image.resize((7_60, 5_04) )
__UpperCAmelCase : Any = "BAAI/AltDiffusion"
__UpperCAmelCase : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : str = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : List[Any] = output.images[0]
__UpperCAmelCase : str = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
__UpperCAmelCase : int = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCAmelCase : Union[str, Any] = init_image.resize((7_68, 5_12) )
__UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__UpperCAmelCase : Union[str, Any] = "BAAI/AltDiffusion"
__UpperCAmelCase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[Any] = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : str = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : str = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 157 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = a.name
UpperCAmelCase_ = b.name
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = a == b
UpperCAmelCase_ = name_a
UpperCAmelCase_ = name_b
return res
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case_ , snake_case_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case_ , snake_case_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = os.path.dirname(snake_case_ )
UpperCAmelCase_ = os.path.basename(snake_case_ )
UpperCAmelCase_ = onnx.load(os.path.join(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = set()
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(snake_case_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case_ )
dup_set.add(snake_case_ )
UpperCAmelCase_ = inits[j].data_type
UpperCAmelCase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , snake_case_ )
total_reduced_size += mem_size
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case_ )
else:
UpperCAmelCase_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
UpperCAmelCase_ = sorted(snake_case_ )
_remove_dup_initializers_from_model(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "optimized_" + model_file_name
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
onnx.save(snake_case_ , snake_case_ )
return new_model
| 359 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_: List[str] =get_logger()
SCREAMING_SNAKE_CASE_: Optional[dict] =None
class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__(self : List[Any] , __a : Optional[int]=None , __a : Any=None , **__a : Dict ):
super().__init__(features=__a )
import jax
from jaxlib.xla_client import Device
if isinstance(__a , __a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__a )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ = device if isinstance(__a , __a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase_ = str(jax.devices()[0] )
UpperCAmelCase_ = jnp_array_kwargs
@staticmethod
def _lowercase ():
import jax
return {str(__a ): device for device in jax.devices()}
def _lowercase (self : str , __a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__a , axis=0 )
return column
def _lowercase (self : Any , __a : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ = {"dtype": jnp.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__a , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase (self : int , __a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__a , "__array__" ) and not isinstance(__a , jax.Array ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 106 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
__SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__SCREAMING_SNAKE_CASE : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __A (__A , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = CamembertTokenizer
__lowercase: str = CamembertTokenizerFast
__lowercase: str = True
__lowercase: Any = True
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCAmelCase ) , 1_004 )
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
snake_case_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case_ = """I was born in 92000, and this is falsé."""
snake_case_ = tokenizer.encode(__UpperCAmelCase )
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
snake_case_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case_ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
snake_case_ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = """I was born in 92000, and this is falsé."""
snake_case_ = tokenizer.tokenize(__UpperCAmelCase )
snake_case_ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
snake_case_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(__UpperCAmelCase )
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case_ = [
"""Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__UpperCAmelCase , )
| 347 | """simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = test_results.split(' ' )
A__ = 0
A__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
A__ = None
A__ = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , UpperCamelCase__ ):
A__ = True
A__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A__ = line
A__ = False
return failures
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = title
A__ = doc_test_results['time_spent'].split(',' )[0]
A__ = doc_test_results['success']
A__ = doc_test_results['failures']
A__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ = doc_test_results
@property
def snake_case__ ( self ) -> str:
A__ = [self._time_spent]
A__ = 0
for time in time_spent:
A__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A__ = [0, 0, time_parts[0]]
A__ , A__ , A__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'''
@property
def snake_case__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
A__ = 40
A__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase ,__UpperCAmelCase )}
A__ = ''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case__ ( self ) -> str:
A__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case__ ( ) -> Any:
A__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=__UpperCAmelCase ,)
def snake_case__ ( self ) -> int:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A__ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
A__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=__UpperCAmelCase ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
A__ = ''
for key, value in failures.items():
A__ = value[:2_00] + ' [Truncated]' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A__ = job_name
A__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self ) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A__ = sorted(self.doc_test_results.items() ,key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A__ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A__ = job_result['failures']
A__ = self.get_reply_blocks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,text=__UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=f'''Results for {job}''' ,blocks=__UpperCAmelCase ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = os.environ['GITHUB_RUN_ID']
A__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A__ = requests.get(UpperCamelCase__ ).json()
A__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCamelCase__ ):
A__ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCamelCase__ )
return {}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
if os.path.exists(UpperCamelCase__ ):
A__ = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='utf-8' ) as f:
A__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.''' ) from e
return _artifact
def UpperCAmelCase ( ):
"""simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = name
A__ = []
def __str__( self ) -> List[str]:
return self.name
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
self.paths.append({'name': self.name, 'path': path} )
A__ = {}
A__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
A__ = directory
if artifact_name not in _available_artifacts:
A__ = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase = get_job_links()
__lowerCamelCase = retrieve_available_artifacts()
__lowerCamelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase = github_actions_job_links.get("run_doctests")
__lowerCamelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__lowerCamelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = handle_test_results(artifact["stats"])
__lowerCamelCase = failed
__lowerCamelCase = success
__lowerCamelCase = time_spent[1:-1] + ", "
__lowerCamelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__lowerCamelCase = line.replace("FAILED ", "")
__lowerCamelCase = line.split()[0].replace("\n", "")
if "::" in line:
__lowerCamelCase , __lowerCamelCase = line.split("::")
else:
__lowerCamelCase , __lowerCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase = all_failures[test] if test in all_failures else "N/A"
__lowerCamelCase = failure
break
__lowerCamelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 221 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : int = BeautifulSoup(requests.get(_UpperCAmelCase, params=_UpperCAmelCase ).content, 'html.parser' )
lowerCAmelCase : Union[str, Any] = soup.find('div', attrs={'class': 'gs_ri'} )
lowerCAmelCase : int = div.find('div', attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
__A : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 323 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :str ,__snake_case :Tuple ,__snake_case :str=7 ,__snake_case :Optional[Any]=3 ,__snake_case :List[str]=30 ,__snake_case :Any=4_00 ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=None ,__snake_case :Tuple=True ,__snake_case :int=[0.5, 0.5, 0.5] ,__snake_case :Union[str, Any]=[0.5, 0.5, 0.5] ,__snake_case :Dict=True ,__snake_case :Union[str, Any]=1 / 2_55 ,__snake_case :Optional[int]=True ,) -> str:
a__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :Any=False ) -> Dict:
if not batched:
a__ = image_inputs[0]
if isinstance(_A ,Image.Image ):
a__ = image.size
else:
a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size['''shortest_edge''']
elif w > h:
a__ = self.size['''shortest_edge''']
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size['''shortest_edge''']
a__ = self.size['''shortest_edge''']
else:
a__ = []
for image in image_inputs:
a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(_A ,key=lambda __snake_case : item[0] )[0]
a__ = max(_A ,key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ (lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ = DetaImageProcessingTester(self )
@property
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :int ) -> Dict:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'do_resize' ) )
self.assertTrue(hasattr(_A ,'do_rescale' ) )
self.assertTrue(hasattr(_A ,'do_pad' ) )
self.assertTrue(hasattr(_A ,'size' ) )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad ,_A )
def lowerCamelCase__( self :str ) -> Dict:
pass
def lowerCamelCase__( self :str ) -> int:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
a__ = DetaImageProcessor()
a__ = image_processing(images=_A ,annotations=_A ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,_A )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_A ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_A ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_A )
a__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_A ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_A ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_A ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_A ) )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_A ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_A ) )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = DetaImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=_A ,annotations=_A ,masks_path=_A ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,_A )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_A ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_A ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_A )
a__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_A ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_A ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_A ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_A ) )
# verify masks
a__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,_A )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_A ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_A ) )
| 240 |
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303 | 0 |
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [False] * len(lowercase )
SCREAMING_SNAKE_CASE : Any = []
queue.append(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while queue:
SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[int] = u
return visited[t]
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * (len(lowercase ))
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while bfs(lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = float("Inf" )
SCREAMING_SNAKE_CASE : List[str] = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE : int = min(lowercase , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE : List[str] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE : Any = sink
while v != source:
SCREAMING_SNAKE_CASE : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE : int = parent[v]
return max_flow
snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
snake_case , snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 319 |
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase__ : str = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : List[Any] = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : List[Any] = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case ,_snake_case ) )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Tuple = char
return pairs
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : int = json.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : int = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
SCREAMING_SNAKE_CASE__ : Tuple = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : str = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word
return word
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + """\n""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : List[Any] = """ """ + text
return (text, kwargs)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> int:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ : Any = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 25 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
SCREAMING_SNAKE_CASE__ : Tuple =Dataset.from_dict(UpperCamelCase__ )
return dataset
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_dataset()
SCREAMING_SNAKE_CASE__ : Dict =make_duplicate_clusters(__lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_dataset()
SCREAMING_SNAKE_CASE__ : List[Any] =deduplicate_dataset(__lowercase )
self.assertEqual(len(__lowercase ) , 2 )
print(__lowercase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __lowercase ) | 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = torch.device("""cpu""")
def _A ( ):
UpperCamelCase :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :Any = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = dct.pop(A_ )
UpperCamelCase :Optional[Any] = val
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Optional[Any] = []
for k in state_dict.keys():
UpperCamelCase :int = k
if ".pwconv" in k:
UpperCamelCase :Dict = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
UpperCamelCase :Dict = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
UpperCamelCase :Any = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
UpperCamelCase :Union[str, Any] = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
UpperCamelCase :Dict = k_new.split('''.''' )
if ls[2].isdigit():
UpperCamelCase :List[str] = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
UpperCamelCase :List[str] = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase :Any = 1000
UpperCamelCase :Union[str, Any] = '''huggingface/label-files'''
UpperCamelCase :int = '''imagenet-1k-id2label.json'''
UpperCamelCase :int = json.load(open(hf_hub_download(A_ , A_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :Optional[int] = {int(A_ ): v for k, v in idalabel.items()}
UpperCamelCase :int = idalabel
UpperCamelCase :int = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase :Optional[int] = [3, 3, 6, 4]
UpperCamelCase :Dict = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase :Any = [3, 3, 9, 6]
UpperCamelCase :Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase :Any = [4, 3, 10, 5]
UpperCamelCase :List[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase :str = [4, 4, 12, 6]
UpperCamelCase :List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(A_ , map_location='''cpu''' , check_hash=A_ )
else:
UpperCamelCase :str = torch.load(A_ , map_location='''cpu''' )
UpperCamelCase :Optional[int] = checkpoint
UpperCamelCase :Dict = create_rename_keys(A_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A_ , A_ , A_ )
# load HuggingFace model
UpperCamelCase :Tuple = SwiftFormerForImageClassification(A_ ).eval()
hf_model.load_state_dict(A_ )
# prepare test inputs
UpperCamelCase :Optional[int] = prepare_img()
UpperCamelCase :List[Any] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
UpperCamelCase :int = processor(images=A_ , return_tensors='''pt''' )
# compare outputs from both models
UpperCamelCase :str = get_expected_output(A_ )
UpperCamelCase :int = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , A_ , atol=1e-3 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 259 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowercase_ : List[Any] = []
for part_id in partition_order:
lowercase_ : Optional[int] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCAmelCase__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : Any = spark.range(100 ).repartition(1 )
lowercase_ : Optional[int] = Spark(UpperCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> int:
lowercase_ : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : List[str] = spark.range(10 ).repartition(2 )
lowercase_ : int = [1, 0]
lowercase_ : Any = _generate_iterable_examples(UpperCAmelCase__ , UpperCAmelCase__ ) # Reverse the partitions.
lowercase_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , UpperCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase_ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : Dict = spark.range(10 ).repartition(1 )
lowercase_ : List[Any] = SparkExamplesIterable(UpperCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : int = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
lowercase_ : Tuple = lambda UpperCAmelCase__ : x.reverse()
lowercase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [2, 1, 0] )
lowercase_ : Union[str, Any] = SparkExamplesIterable(UpperCAmelCase__ ).shuffle_data_sources(UpperCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
lowercase_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase_ : Union[str, Any] = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
lowercase_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase_ : Optional[Any] = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
lowercase_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : Dict = spark.range(100 ).repartition(1 )
lowercase_ : str = Spark(UpperCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 371 | '''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase_ : str , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = eval_examples
SCREAMING_SNAKE_CASE : Tuple = post_process_function
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str = "eval" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_eval_dataloader(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Any = self.compute_metrics
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE : Any = time.time()
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
lowerCamelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Dict = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : List[str] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str = "test" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : int = self.compute_metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE : Any = time.time()
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
lowerCamelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : Tuple = compute_metrics
SCREAMING_SNAKE_CASE : int = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : str = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , """predict""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
__a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a =CLIPTextModel(__snake_case )
__a =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1]
__a =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__a =np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
__a =np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.get_dummy_components()
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =3 * [inputs['prompt']]
# forward
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb_slice_a[0, -3:, -3:, -1]
__a =depth_slice_a[0, -3:, -1]
__a =self.get_dummy_inputs(__snake_case )
__a =3 * [inputs.pop('prompt' )]
__a =ldmad_pipe.tokenizer(
__snake_case , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
__a =text_inputs['input_ids'].to(__snake_case )
__a =ldmad_pipe.text_encoder(__snake_case )[0]
__a =prompt_embeds
# forward
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb_slice_a[0, -3:, -3:, -1]
__a =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =PNDMScheduler(skip_prk_steps=__snake_case )
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a ='french fries'
__a =ldmad_pipe(**__snake_case , negative_prompt=__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1]
__a =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__a =np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
__a =np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Optional[int]:
'''simple docstring'''
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a =np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
__a =torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__a ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1].flatten()
__a =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__a =np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
__a =np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Optional[int]:
'''simple docstring'''
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a =np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
__a =torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__a ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =0.49_5586
__a =0.3379_5515
__a =112.4_8518
__a =98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =0.419_4127
__a =0.3537_5586
__a =0.563_8502
__a =0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 308 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308 | 1 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : str = 10
def _snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
'''simple docstring'''
A: str = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Tuple:
'''simple docstring'''
A: Union[str, Any] = self.scheduler_classes[0]
A: int = self.get_scheduler_config(prediction_type='''v_prediction''' )
A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
A: Union[str, Any] = self.dummy_model()
A: Any = self.dummy_sample_deter * scheduler.init_noise_sigma
A: str = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
A: Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Dict = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Optional[int] = output.prev_sample
A: int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _snake_case ( self : int ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
A: List[Any] = self.scheduler_classes[0]
A: int = self.get_scheduler_config()
A: Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
A: str = self.dummy_model()
A: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
A: Dict = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
A: Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = output.prev_sample
A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
A: str = self.scheduler_classes[0]
A: Any = self.get_scheduler_config()
A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
A: str = self.dummy_model()
A: int = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A: List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = output.prev_sample
A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = cn.convert_to_negative(UpperCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase ( ):
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ = canny.canny(UpperCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
A__ = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
assert res.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(UpperCamelCase__ , 3 ).any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ , A__ = sob.sobel_filter(UpperCamelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = sp.make_sepia(UpperCamelCase__ , 20 )
assert sepia.all()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
A__ = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
A__ = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A__ = imread(UpperCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A__ = 0
A__ = 0
A__ = image[x_coordinate][y_coordinate]
A__ = lbp.get_neighbors_pixel(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A__ = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert lbp_image.any()
| 154 | """simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase__ ).groups()[0]
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> List[str]:
A__ = file_names
A__ = image_transform
A__ = label_to_id
def __len__( self ) -> Dict:
return len(self.file_names )
def __getitem__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.file_names[idx]
A__ = PIL.Image.open(__UpperCAmelCase )
A__ = raw_image.convert('RGB' )
if self.image_transform is not None:
A__ = self.image_transform(__UpperCAmelCase )
A__ = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
A__ = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = config['image_size']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
A__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A__ = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
A__ = [extract_label(UpperCamelCase__ ) for fname in file_names]
A__ = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A__ = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A__ = np.random.permutation(len(UpperCamelCase__ ) )
A__ = int(0.8 * len(UpperCamelCase__ ) )
A__ = random_perm[:cut]
A__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A__ = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = create_model('resnet50d' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ = False
for param in model.get_classifier().parameters():
A__ = True
# We normalize the batches of images to be a bit faster.
A__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
A__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the starting epoch so files are named properly
A__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A__ = int(training_difference.replace('epoch_' , '' ) ) + 1
A__ = None
else:
A__ = int(training_difference.replace('step_' , '' ) )
A__ = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
A__ = model(UpperCamelCase__ )
A__ = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A__ = 0
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
with torch.no_grad():
A__ = model(UpperCamelCase__ )
A__ = outputs.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['label']) )
A__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A__ = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 154 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.