code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""String lengths must match!""" )
lowerCamelCase__ : int = 0
for chara, chara in zip(UpperCamelCase , UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42 |
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowercase , __lowercase , __lowercase = False, False, False
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self) -> Any:
return self.pa_type
def UpperCamelCase__ ( self , __lowercase) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(__lowercase , __lowercase):
return {"bytes": None, "path": value}
elif isinstance(__lowercase , __lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__UpperCamelCase :int = BytesIO()
sf.write(__lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__UpperCamelCase :Optional[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32_767
else:
__UpperCamelCase :Union[str, Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32_767
__UpperCamelCase :Tuple = BytesIO(bytes())
sf.write(__lowercase , __lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
__UpperCamelCase , __UpperCamelCase :List[Any] = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
__UpperCamelCase :Dict = xsplitext(__lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
__UpperCamelCase :Any = token_per_repo_id or {}
__UpperCamelCase :Any = path.split('''::''')[-1]
try:
__UpperCamelCase :Optional[Any] = string_to_dict(__lowercase , config.HUB_DATASETS_URL)['''repo_id''']
__UpperCamelCase :Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__UpperCamelCase :str = None
with xopen(__lowercase , '''rb''' , use_auth_token=__lowercase) as f:
__UpperCamelCase , __UpperCamelCase :List[str] = sf.read(__lowercase)
else:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = sf.read(__lowercase)
__UpperCamelCase :List[str] = array.T
if self.mono:
__UpperCamelCase :Any = librosa.to_mono(__lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
__UpperCamelCase :Tuple = librosa.resample(__lowercase , orig_sr=__lowercase , target_sr=self.sampling_rate)
__UpperCamelCase :Optional[int] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def UpperCamelCase__ ( self , __lowercase) -> pa.StructArray:
if pa.types.is_string(storage.type):
__UpperCamelCase :List[str] = pa.array([None] * len(__lowercase) , type=pa.binary())
__UpperCamelCase :Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
__UpperCamelCase :Union[str, Any] = pa.array([None] * len(__lowercase) , type=pa.string())
__UpperCamelCase :Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
__UpperCamelCase :Any = pa.array([Audio().encode_example(__lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
__UpperCamelCase :List[str] = storage.field('''bytes''')
else:
__UpperCamelCase :Union[str, Any] = pa.array([None] * len(__lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
__UpperCamelCase :Tuple = storage.field('''path''')
else:
__UpperCamelCase :int = pa.array([None] * len(__lowercase) , type=pa.string())
__UpperCamelCase :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(__lowercase , self.pa_type)
def UpperCamelCase__ ( self , __lowercase) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowercase):
with xopen(__lowercase , '''rb''') as f:
__UpperCamelCase :Any = f.read()
return bytes_
__UpperCamelCase :Optional[int] = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCamelCase :List[str] = pa.array(
[os.path.basename(__lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
__UpperCamelCase :Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(__lowercase , self.pa_type)
| 43 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"google/realm-cc-news-pretrained-embedder": 5_1_2,
"google/realm-cc-news-pretrained-encoder": 5_1_2,
"google/realm-cc-news-pretrained-scorer": 5_1_2,
"google/realm-cc-news-pretrained-openqa": 5_1_2,
"google/realm-orqa-nq-openqa": 5_1_2,
"google/realm-orqa-nq-reader": 5_1_2,
"google/realm-orqa-wq-openqa": 5_1_2,
"google/realm-orqa-wq-reader": 5_1_2,
}
lowercase_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = RealmTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
__a = getattr(_a , normalizer_state.pop('''type''' ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**_a )
__a = do_lower_case
def __UpperCAmelCase ( self , _a , **_a ):
__a = PaddingStrategy.MAX_LENGTH
__a = text
__a = kwargs.pop('''text_pair''' , _a )
__a = kwargs.pop('''return_tensors''' , _a )
__a = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
__a = batch_text_pair[idx]
else:
__a = None
__a = super().__call__(_a , _a , return_tensors=_a , **_a )
__a = encoded_candidates.get('''input_ids''' )
__a = encoded_candidates.get('''attention_mask''' )
__a = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
__a = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def __UpperCAmelCase ( self , _a , _a=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 45 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase : str ) -> YolosConfig:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_SCREAMING_SNAKE_CASE =1_92
_SCREAMING_SNAKE_CASE =7_68
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =[8_00, 13_33]
_SCREAMING_SNAKE_CASE =False
elif yolos_name == "yolos_s_dWr":
_SCREAMING_SNAKE_CASE =3_30
_SCREAMING_SNAKE_CASE =14
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =13_20
elif "yolos_s" in yolos_name:
_SCREAMING_SNAKE_CASE =3_84
_SCREAMING_SNAKE_CASE =15_36
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =6
elif "yolos_b" in yolos_name:
_SCREAMING_SNAKE_CASE =[8_00, 13_44]
_SCREAMING_SNAKE_CASE =91
_SCREAMING_SNAKE_CASE ='huggingface/label-files'
_SCREAMING_SNAKE_CASE ='coco-detection-id2label.json'
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_SCREAMING_SNAKE_CASE ={int(_UpperCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : YolosConfig , _UpperCamelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE =in_proj_weight[: config.hidden_size, :]
_SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE =in_proj_weight[-config.hidden_size :, :]
_SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
if "backbone" in name:
_SCREAMING_SNAKE_CASE =name.replace('backbone' , 'vit' )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE =name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
_SCREAMING_SNAKE_CASE =name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
_SCREAMING_SNAKE_CASE =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_SCREAMING_SNAKE_CASE =name.replace('attn' , 'attention.self' )
if "norm1" in name:
_SCREAMING_SNAKE_CASE =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_SCREAMING_SNAKE_CASE =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE =name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
_SCREAMING_SNAKE_CASE =name.replace('vit.norm' , 'vit.layernorm' )
return name
def _lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE =orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
_SCREAMING_SNAKE_CASE =key.split('.' )
_SCREAMING_SNAKE_CASE =int(key_split[2] )
_SCREAMING_SNAKE_CASE =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_SCREAMING_SNAKE_CASE =val[:dim, :]
_SCREAMING_SNAKE_CASE =val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE =val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE =val[:dim]
_SCREAMING_SNAKE_CASE =val[dim : dim * 2]
_SCREAMING_SNAKE_CASE =val[-dim:]
else:
_SCREAMING_SNAKE_CASE =val
return orig_state_dict
def _lowerCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='http://images.cocodataset.org/val2017/000000039769.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_yolos_config(_UpperCamelCase )
# load original state_dict
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
_SCREAMING_SNAKE_CASE =YolosForObjectDetection(_UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE =convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
_SCREAMING_SNAKE_CASE =8_00 if yolos_name != 'yolos_ti' else 5_12
_SCREAMING_SNAKE_CASE =YolosImageProcessor(format='coco_detection' , size=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =image_processor(images=prepare_img() , return_tensors='pt' )
_SCREAMING_SNAKE_CASE =model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =outputs.logits, outputs.pred_boxes
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =None, None
if yolos_name == "yolos_ti":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
_SCREAMING_SNAKE_CASE ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
_SCREAMING_SNAKE_CASE =model_mapping[yolos_name]
image_processor.push_to_hub(_UpperCamelCase , organization='hustvl' )
model.push_to_hub(_UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase : int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 47 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 0 |
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
def __snake_case ( _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
for i in range(length - 1 ):
__a = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
__a = k
if least != i:
__a , __a = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case :int = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 49 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
import socket
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Dict = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase__ : Union[str, Any] = socket.gethostname()
lowerCamelCase__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase__ : Dict = sock.recv(1024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 50 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
import warnings
from functools import wraps
from typing import Callable
def A (__A : Callable ) -> Callable:
"""simple docstring"""
@wraps(__A )
def _inner_fn(*__A : Dict , **__A : int ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , __A , )
return fn(*__A , **__A )
return _inner_fn
| 51 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase : Optional[Any] = tuple[int, int]
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pos_x
UpperCamelCase : int = pos_y
UpperCamelCase : Union[str, Any] = (pos_y, pos_x)
UpperCamelCase : Optional[int] = goal_x
UpperCamelCase : str = goal_y
UpperCamelCase : Dict = g_cost
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = self.calculate_heuristic()
UpperCamelCase : List[str] = self.g_cost + self.h_cost
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.pos_x - self.goal_x
UpperCamelCase : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , A_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A_ )
UpperCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , A_ )
UpperCamelCase : int = [self.start]
UpperCamelCase : list[Node] = []
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
UpperCamelCase : Tuple = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = []
for action in delta:
UpperCamelCase : List[Any] = parent.pos_x + action[1]
UpperCamelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_ , A_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A_ , ) )
return successors
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = node
UpperCamelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : int = current_node.parent
path.reverse()
return path
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = AStar(A_ , A_ )
UpperCamelCase : Any = AStar(A_ , A_ )
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase : List[Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_ , A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
UpperCamelCase : Union[str, Any] = current_bwd_node
UpperCamelCase : Union[str, Any] = current_fwd_node
UpperCamelCase : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.fwd_astar.retrace_path(A_ )
UpperCamelCase : Optional[int] = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase : Optional[Any] = (0, 0)
__lowerCamelCase : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = AStar(init, goal)
__lowerCamelCase : int = a_star.search()
__lowerCamelCase : List[str] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : Any = BidirectionalAStar(init, goal)
__lowerCamelCase : Optional[int] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 52 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] ={"do_clean_text": False, "add_prefix_space": False}
def _lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
__UpperCamelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__UpperCamelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def _lowerCamelCase ( self : Union[str, Any] , **__A : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowerCamelCase ( self : Any , __A : Dict ):
__UpperCamelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__UpperCamelCase = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _lowerCamelCase ( self : Union[str, Any] , __A : Any ):
__UpperCamelCase , __UpperCamelCase = self.get_input_output_texts(__A )
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
__UpperCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def _lowerCamelCase ( self : int ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Optional[int] ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_tokenizer()
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。 こんばんは、㔺界。'
__UpperCamelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
__UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.get_tokenizer()
# Testing tokenization
__UpperCamelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__UpperCamelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__UpperCamelCase = tokenizer.encode(__A )
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。'
__UpperCamelCase = 'こんばんは、㔺界。😀'
__UpperCamelCase = 'こんにちは、世界。こんばんは、世界。😀'
__UpperCamelCase = tokenizer.encode(prefix_text + input_text )
__UpperCamelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__UpperCamelCase = tokenizer.encode(__A , prefix_text=__A )
__UpperCamelCase = tokenizer.decode(__A )
__UpperCamelCase = tokenizer.decode(__A )
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。'
__UpperCamelCase = 'こんばんは、㔺界。😀'
__UpperCamelCase = len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase = len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase = [1] + [0] * (len_prefix + len_text + 1)
__UpperCamelCase = [1] * (len_prefix + len_text + 1) + [0]
__UpperCamelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCamelCase = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCamelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCamelCase = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase = tokenizer.encode('あンいワ' )
__UpperCamelCase = tokenizer.encode('' , prefix_text='あンいワ' )
__UpperCamelCase = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__UpperCamelCase = tokenizer(__A , padding=__A )
__UpperCamelCase = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
__UpperCamelCase = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__UpperCamelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCamelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def _lowerCamelCase ( self : str ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _lowerCamelCase ( self : str ):
# tokenizer has no padding token
pass
| 53 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
import requests
a__ : Dict = '''''' # <-- Put your OpenWeatherMap appid here!
a__ : Union[str, Any] = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase__ (lowerCAmelCase_ = "Chicago" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = "Kolkata, India" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = 55.68 , lowerCAmelCase_ = 12.57 , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a__ : Any = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 54 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Dict = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "mgp-str"
def __init__( self , UpperCamelCase=[32, 128] , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=27 , UpperCamelCase=38 , UpperCamelCase=5_0257 , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=1e-5 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=False , UpperCamelCase=0.02 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = max_token_length
lowerCamelCase_ = num_character_labels
lowerCamelCase_ = num_bpe_labels
lowerCamelCase_ = num_wordpiece_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = distilled
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = drop_rate
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = attn_drop_rate
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = output_aa_attentions
lowerCamelCase_ = initializer_range
| 55 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : List[str] ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
snake_case_ = TextIteratorStreamer(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Dict ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = greedy_ids[:, input_ids.shape[1] :]
snake_case_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_prompt=lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ )
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case_ = cs.out[:-1] # Remove the final "\n"
snake_case_ = tokenizer(lowercase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A_ ( self : str ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = TextIteratorStreamer(lowercase_ , timeout=0.001 )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_ ):
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 56 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowercase_ = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
A__ : float
A__ : TreeNode | None = None
A__ : TreeNode | None = None
def UpperCamelCase ( __lowerCamelCase : TreeNode | None ):
# Validation
def is_valid_tree(__lowerCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__lowerCamelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
__lowerCamelCase : TreeNode | None , __lowerCamelCase : float , __lowerCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __lowerCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __lowerCamelCase )
)
return is_binary_search_tree_recursive_check(__lowerCamelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316 | 0 |
"""simple docstring"""
import numpy as np
import datasets
snake_case__ : Dict = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
snake_case__ : Union[str, Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
snake_case__ : Any = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
# convert to numpy arrays
lowerCAmelCase : List[str] = np.array(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase : List[Any] = X - np.mean(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
lowerCAmelCase : Dict = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
lowerCAmelCase : List[str] = np.linalg.pinv(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 60 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_a = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
UpperCAmelCase_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : str = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 61 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : NDArray[floataa] , SCREAMING_SNAKE_CASE__ : NDArray[floataa] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , ):
__UpperCamelCase , __UpperCamelCase =coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase =constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase =F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if colsa != 1:
__UpperCamelCase =F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if rowsa != rowsa:
__UpperCamelCase =(
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != rowsa:
__UpperCamelCase =(
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(SCREAMING_SNAKE_CASE__ )} and {rowsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__UpperCamelCase =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase =table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE__ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[]
for row in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
for col in range(SCREAMING_SNAKE_CASE__ ):
if col == row:
__UpperCamelCase =table[row][col]
elif col == cols - 1:
__UpperCamelCase =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase =(temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_val
return [float(SCREAMING_SNAKE_CASE__ ) for i in new_val]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : NDArray[floataa] ):
__UpperCamelCase , __UpperCamelCase =table.shape
__UpperCamelCase =True
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["input_features"]
def __init__( self: List[Any], a_: Tuple=80, a_: List[str]=16_000, a_: Tuple=160, a_: Optional[int]=30, a_: List[str]=400, a_: Union[str, Any]=0.0, a_: Union[str, Any]=False, **a_: Dict, ):
'''simple docstring'''
super().__init__(
feature_size=a_, sampling_rate=a_, padding_value=a_, return_attention_mask=a_, **a_, )
_snake_case : Union[str, Any] = n_fft
_snake_case : Optional[Any] = hop_length
_snake_case : Optional[int] = chunk_length
_snake_case : str = chunk_length * sampling_rate
_snake_case : Dict = self.n_samples // hop_length
_snake_case : List[str] = sampling_rate
_snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=a_, min_frequency=0.0, max_frequency=8_000.0, sampling_rate=a_, norm="""slaney""", mel_scale="""slaney""", )
def UpperCamelCase_ ( self: List[Any], a_: np.array ):
'''simple docstring'''
_snake_case : str = spectrogram(
a_, window_function(self.n_fft, """hann""" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel="""log10""", )
_snake_case : Dict = log_spec[:, :-1]
_snake_case : List[Any] = np.maximum(a_, log_spec.max() - 8.0 )
_snake_case : int = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( a_: List[np.ndarray], a_: List[np.ndarray], a_: float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
_snake_case : str = np.array(a_, np.intaa )
_snake_case : str = []
for vector, length in zip(a_, attention_mask.sum(-1 ) ):
_snake_case : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_snake_case : Any = padding_value
normed_input_values.append(a_ )
else:
_snake_case : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self: Optional[int], a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: bool = True, a_: Optional[int] = None, a_: Optional[Union[str, TensorType]] = None, a_: Optional[bool] = None, a_: Optional[str] = "max_length", a_: Optional[int] = None, a_: Optional[int] = None, a_: Optional[bool] = None, **a_: Any, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_snake_case : str = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_snake_case : Optional[Any] = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
_snake_case : Optional[Any] = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : List[str] = [np.asarray([raw_speech] ).T]
_snake_case : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_snake_case : Optional[Any] = self.pad(
a_, padding=a_, max_length=max_length if max_length else self.n_samples, truncation=a_, pad_to_multiple_of=a_, return_attention_mask=return_attention_mask or do_normalize, )
# zero-mean and unit-variance normalization
if do_normalize:
_snake_case : Dict = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""], attention_mask=padded_inputs["""attention_mask"""], padding_value=self.padding_value, )
_snake_case : Tuple = np.stack(padded_inputs["""input_features"""], axis=0 )
# make sure list is in array format
_snake_case : Optional[int] = padded_inputs.get("""input_features""" ).transpose(2, 0, 1 )
_snake_case : Dict = [self._np_extract_fbank_features(a_ ) for waveform in input_features[0]]
if isinstance(input_features[0], a_ ):
_snake_case : List[str] = [np.asarray(a_, dtype=np.floataa ) for feature in input_features]
else:
_snake_case : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_snake_case : Any = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
_snake_case : List[str] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
_snake_case : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 64 |
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: str = "▁" , snake_case: bool = True , snake_case: Union[str, AddedToken] = "<unk>" , snake_case: Union[str, AddedToken] = "</s>" , snake_case: Union[str, AddedToken] = "<pad>" , ) -> Any:
snake_case_ :Any = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case_ :Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ :Tuple = token_dict["""token"""]
snake_case_ :Union[str, Any] = Tokenizer(Unigram() )
snake_case_ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case_ :str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case , add_prefix_space=snake_case ),
pre_tokenizers.Digits(individual_digits=snake_case ),
pre_tokenizers.Punctuation(),
] )
snake_case_ :Dict = decoders.Metaspace(replacement=snake_case , add_prefix_space=snake_case )
snake_case_ :str = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case_ :Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> int:
snake_case_ :List[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
if isinstance(snake_case , snake_case ):
snake_case_ :int = [files]
self._tokenizer.train(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: Dict , snake_case: Union[Iterator[str], Iterator[Iterator[str]]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> List[str]:
snake_case_ :Optional[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
self._tokenizer.train_from_iterator(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case_ :Dict = json.loads(self._tokenizer.to_str() )
snake_case_ :Optional[int] = self.special_tokens["""unk"""]["""id"""]
snake_case_ :Optional[int] = Tokenizer.from_str(json.dumps(snake_case ) )
| 66 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
__UpperCAmelCase =set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = "new" , UpperCamelCase__ = None ) -> dict:
__lowerCamelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase__ ) - valid_terms ) ):
__lowerCamelCase = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(UpperCamelCase__ )
__lowerCamelCase = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
__lowerCamelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase__ )}
__lowerCamelCase = {}
for id_ in range(UpperCamelCase__ ):
__lowerCamelCase = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 67 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCAmelCase__ ( ) -> Node | None:
'''simple docstring'''
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
A__ = []
A__ = 0
A__ = height(SCREAMING_SNAKE_CASE_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 0
return output
def lowerCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
A__ = make_tree()
print(F'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print(F'Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(SCREAMING_SNAKE_CASE_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A__ : str =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
A__ : Any =10
A__ : Optional[int] =2_56
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=lowerCAmelCase )
for token in set(lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(lowerCAmelCase ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self : Any , *,
__snake_case : float = 0.85 , ) -> List[Any]:
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Tuple , __snake_case : MinHash ) -> None:
_lowerCAmelCase = self._index.query(__snake_case )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(__snake_case , __snake_case )
if len(__snake_case ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__snake_case )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__snake_case )
def lowercase__ ( self : Optional[Any] ) -> List[List[Dict]]:
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(__snake_case )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__snake_case )
return duplicate_clusters
def lowercase__ ( self : str , __snake_case : Any ) -> None:
_lowerCAmelCase = self.get_duplicate_clusters()
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(lowerCAmelCase , lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_tokens(lowerCAmelCase )
_lowerCAmelCase = get_tokens(lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A__ : Tuple =None
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase , lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(lowerCAmelCase )
return extremes
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase , lowerCAmelCase , ) , total=len(lowerCAmelCase ) , ):
extremes_list.append(lowerCAmelCase )
return extremes_list
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 0.85 ):
"""simple docstring"""
_lowerCAmelCase = make_duplicate_clusters(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda lowerCAmelCase , lowerCAmelCase : idx not in remove_indices , with_indices=lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(lowerCAmelCase )}" )
print(f"Number of duplicate clusters: {len(lowerCAmelCase )}" )
print(f"Files in duplicate cluster: {len(lowerCAmelCase )}" )
print(f"Unique files in duplicate cluster: {len(lowerCAmelCase )}" )
print(f"Filtered dataset size: {len(lowerCAmelCase )}" )
return ds_filter, duplicate_clusters
| 70 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =StableDiffusionInstructPixaPixPipeline
UpperCamelCase__ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : str =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : Optional[int] =PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase : Tuple =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : Any =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : Tuple ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Tuple =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Dict =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : str =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : List[Any] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : List[str] =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
__UpperCamelCase : List[Any] =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[str] =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : Dict =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : int =self.get_dummy_components()
__UpperCamelCase : str =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
__UpperCamelCase : str =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] ='french fries'
__UpperCamelCase : Union[str, Any] =sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
__UpperCamelCase : int =output.images
__UpperCamelCase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : Tuple =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[Any] =self.get_dummy_components()
__UpperCamelCase : List[str] =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[str] =[inputs['prompt']] * 2
__UpperCamelCase : List[str] =np.array(inputs['image'] ).astype(np.floataa ) / 255.0
__UpperCamelCase : Any =torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
__UpperCamelCase : int =image / 2 + 0.5
__UpperCamelCase : Dict =image.permute(0 , 3 , 1 , 2 )
__UpperCamelCase : Dict =image.repeat(2 , 1 , 1 , 1 )
__UpperCamelCase : str =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[Any] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__UpperCamelCase : List[Any] =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Any =self.get_dummy_components()
__UpperCamelCase : List[Any] =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__UpperCamelCase : Tuple =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
__UpperCamelCase : Dict =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[str] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[int] =image[0, -3:, -3:, -1]
__UpperCamelCase : Union[str, Any] =[round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : Optional[Any] =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.get_dummy_components()
__UpperCamelCase : List[str] =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type='pt' ) )[0]
__UpperCamelCase : Union[str, Any] =components['vae']
__UpperCamelCase : int =self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCamelCase : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCamelCase : Union[str, Any] =pipe(**lowerCamelCase__ )[0]
__UpperCamelCase : Optional[int] =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : List[Any] =torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple =load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__UpperCamelCase : str ={
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : int =self.get_inputs()
__UpperCamelCase : Optional[Any] =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[int] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : str =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase__ )
__UpperCamelCase : Tuple =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : List[Any] =self.get_inputs()
__UpperCamelCase : Optional[int] =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : Union[str, Any] =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase__ )
__UpperCamelCase : List[Any] =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Tuple =self.get_inputs()
__UpperCamelCase : List[str] =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : str =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__UpperCamelCase : List[Any] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCamelCase : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCamelCase : List[Any] =latents[0, -3:, -3:, -1]
__UpperCamelCase : Tuple =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__UpperCamelCase : str =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCamelCase : Dict =latents[0, -3:, -3:, -1]
__UpperCamelCase : Any =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__UpperCamelCase : Dict =False
__UpperCamelCase : Any =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : Tuple =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Any =self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : Dict =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : str =self.get_inputs()
__UpperCamelCase : Optional[int] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCamelCase : List[Any] =inputs['image'].resize((504, 504) )
__UpperCamelCase : Tuple ='timbrooks/instruct-pix2pix'
__UpperCamelCase : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Optional[int] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Tuple =output.images[0]
__UpperCamelCase : Optional[Any] =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__UpperCamelCase : Dict =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 71 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return None
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
return None
class __snake_case ( unittest.TestCase):
snake_case__ : Optional[Any] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Optional[int] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__lowerCAmelCase ) )
vocab_file.flush()
_lowerCamelCase : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__lowerCAmelCase ) ) )
model.save_pretrained(__lowerCAmelCase )
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , __lowerCAmelCase )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(Path(__lowerCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(__lowerCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase : List[Any] = Path(__lowerCAmelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return path
except Exception as e:
self.fail(__lowerCAmelCase )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Any = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
from transformers import TFBertModel
_lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''tf''' )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FeatureExtractionPipeline(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = infer_shapes(__lowerCAmelCase , __lowerCAmelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCAmelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase : str = ensure_valid_input(FuncContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCAmelCase ) , set(__lowerCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCAmelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 72 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 73 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = '''ResNetConfig'''
# Base docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = [1, 20_48, 7, 7]
# Image classification docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = '''tiger cat'''
_lowercase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 3 ,A_ : int = 1 ,A_ : str = "relu" ) -> Tuple:
super().__init__()
A = nn.Convad(
A_ ,A_ ,kernel_size=A_ ,stride=A_ ,padding=kernel_size // 2 ,bias=A_ )
A = nn.BatchNormad(A_ )
A = ACTaFN[activation] if activation is not None else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : ResNetConfig ) -> Union[str, Any]:
super().__init__()
A = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A = config.num_channels
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A = self.embedder(A_ )
A = self.pooler(A_ )
return embedding
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : int ,A_ : int ,A_ : int = 2 ) -> Optional[int]:
super().__init__()
A = nn.Convad(A_ ,A_ ,kernel_size=1 ,stride=A_ ,bias=A_ )
A = nn.BatchNormad(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ) -> Optional[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ,A_ : int = 4 ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = out_channels // reduction
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,kernel_size=1 ) ,ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,kernel_size=1 ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : ResNetConfig ,A_ : int ,A_ : int ,A_ : int = 2 ,A_ : int = 2 ,) -> Tuple:
super().__init__()
A = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ ,A_ ,stride=A_ ,activation=config.hidden_act ) ,*[layer(A_ ,A_ ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tensor ) -> Tensor:
A = input
for layer in self.layers:
A = layer(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : ResNetConfig ) -> Dict:
super().__init__()
A = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ ,config.depths[1:] ):
self.stages.append(ResNetStage(A_ ,A_ ,A_ ,depth=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tensor ,A_ : bool = False ,A_ : bool = True ) -> BaseModelOutputWithNoAttention:
A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A = hidden_states + (hidden_state,)
A = stage_module(A_ )
if output_hidden_states:
A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ ,hidden_states=A_ ,)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ResNetConfig
_lowerCamelCase: List[Any] = '''resnet'''
_lowerCamelCase: int = '''pixel_values'''
_lowerCamelCase: int = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ) -> Any:
if isinstance(A_ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Tuple=False ) -> str:
if isinstance(A_ ,A_ ):
A = value
_lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ) -> Dict:
super().__init__(A_ )
A = config
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.embedder(A_ )
A = self.encoder(
A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = encoder_outputs[0]
A = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ ,pooler_output=A_ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Any ) -> List[Any]:
super().__init__(A_ )
A = config.num_labels
A = ResNetModel(A_ )
# classification head
A = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[torch.FloatTensor] = None ,A_ : Optional[torch.LongTensor] = None ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.resnet(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.pooler_output if return_dict else outputs[1]
A = self.classifier(A_ )
A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A = 'single_label_classification'
else:
A = 'multi_label_classification'
if self.config.problem_type == "regression":
A = MSELoss()
if self.num_labels == 1:
A = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A = loss_fct(A_ ,A_ )
elif self.config.problem_type == "single_label_classification":
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A = BCEWithLogitsLoss()
A = loss_fct(A_ ,A_ )
if not return_dict:
A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ ,logits=A_ ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
def __init__( self : int ,A_ : Union[str, Any] ) -> Optional[int]:
super().__init__(A_ )
super()._init_backbone(A_ )
A = [config.embedding_size] + config.hidden_sizes
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ ,config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BackboneOutput:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = self.embedder(A_ )
A = self.encoder(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.hidden_states
A = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=A_ ,)
| 74 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =13
lowerCamelCase_ =7
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =99
lowerCamelCase_ =384
lowerCamelCase_ =2
lowerCamelCase_ =4
lowerCamelCase_ =37
lowerCamelCase_ ='''gelu'''
lowerCamelCase_ =0.1
lowerCamelCase_ =0.1
lowerCamelCase_ =512
lowerCamelCase_ =16
lowerCamelCase_ =2
lowerCamelCase_ =0.0_2
lowerCamelCase_ =3
lowerCamelCase_ =4
lowerCamelCase_ =128
lowerCamelCase_ =2
lowerCamelCase_ =9
lowerCamelCase_ =1
lowerCamelCase_ =None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCAmelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =[input_ids, input_mask]
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertForMaskedLM(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFConvBertForSequenceClassification(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =TFConvBertForMultipleChoice(config=lowerCAmelCase )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFConvBertForTokenClassification(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertForQuestionAnswering(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase : List[str] =(
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : int =False
lowercase : List[Any] =False
lowercase : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
lowerCamelCase_ =True
if hasattr(lowerCAmelCase, '''use_cache''' ):
lowerCamelCase_ =True
lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =len(model(lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase, saved_model=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, '''saved_model''', '''1''' )
lowerCamelCase_ =tf.keras.models.load_model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase_ =outputs['''encoder_hidden_states''']
lowerCamelCase_ =outputs['''encoder_attentions''']
else:
lowerCamelCase_ =outputs['''hidden_states''']
lowerCamelCase_ =outputs['''attentions''']
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
lowerCamelCase_ =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
lowerCamelCase_ =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
def check_decoder_attentions_output(lowerCAmelCase ):
lowerCamelCase_ =len(lowerCAmelCase )
self.assertEqual(out_len % 2, 0 )
lowerCamelCase_ =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(lowerCAmelCase ):
lowerCamelCase_ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =len(lowerCAmelCase )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_decoder_attentions_output(lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ =True
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =[1, 6, 768]
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase, atol=1e-4 )
| 75 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ ( _a , _a , _a=1E-12):
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_a , axis=1) , a_min=_a)).T
SCREAMING_SNAKE_CASE : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_a , axis=1) , a_min=_a)).T
return jnp.matmul(_a , norm_emb_a.T)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =jnp.floataa
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.config.projection_dim , use_bias=a , dtype=self.dtype )
SCREAMING_SNAKE_CASE : str = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE : Optional[int] = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE : List[Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE : Any = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , a : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_model(a )[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(a )
SCREAMING_SNAKE_CASE : List[Any] = jax_cosine_distance(a , self.special_care_embeds )
SCREAMING_SNAKE_CASE : str = jax_cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
SCREAMING_SNAKE_CASE : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE : str = jnp.round(a , 3 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE : Union[str, Any] = is_special_care * 0.01
SCREAMING_SNAKE_CASE : Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE : Dict = jnp.round(a , 3 )
SCREAMING_SNAKE_CASE : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CLIPConfig
lowerCamelCase__ ='clip_input'
lowerCamelCase__ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : str , a : CLIPConfig , a : Optional[Tuple] = None , a : int = 0 , a : jnp.dtype = jnp.floataa , a : bool = True , **a : str , ) -> int:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE : Optional[int] = self.module_class(config=a , dtype=a , **a )
super().__init__(a , a , input_shape=a , seed=a , dtype=a , _do_init=_do_init )
def __UpperCamelCase ( self : str , a : jax.random.KeyArray , a : Tuple , a : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.normal(a , a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = jax.random.split(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
SCREAMING_SNAKE_CASE : int = self.module.init(a , a )["params"]
return random_params
def __call__( self : List[Any] , a : Optional[int] , a : dict = None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = jnp.transpose(a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(a , dtype=jnp.floataa ) , rngs={} , )
| 76 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[str] = len(_lowerCAmelCase ) + 1
lowercase__ : Any = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
lowercase__ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Tuple = dp[i - 1][j]
else:
lowercase__ : Tuple = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Any = "aab"
_UpperCamelCase : int = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 77 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
"""simple docstring"""
from __future__ import annotations
snake_case_ = 10
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 1
UpperCAmelCase = max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase = [[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase = int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
UpperCAmelCase = 0
for b in range(lowercase_ ):
for i in buckets[b]:
UpperCAmelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
_A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowercase ) )
] # the reference grid
_A = 1
_A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowercase ) )
] # the action grid
_A = init[0]
_A = init[1]
_A = 0
_A = g + heuristic[x][y] # cost from starting cell to destination cell
_A = [[f, g, x, y]]
_A = False # flag that is set when search is complete
_A = False # flag set if we can't find expand
while not found and not resign:
if len(__lowercase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_A = cell.pop()
_A = next_cell[2]
_A = next_cell[3]
_A = next_cell[1]
if x == goal[0] and y == goal[1]:
_A = True
else:
for i in range(len(__lowercase ) ): # to try out different valid actions
_A = x + DIRECTIONS[i][0]
_A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_A = g + cost
_A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_A = 1
_A = i
_A = []
_A = goal[0]
_A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_A = x - DIRECTIONS[action[x][y]][0]
_A = y - DIRECTIONS[action[x][y]][1]
_A = xa
_A = ya
invpath.append([x, y] )
_A = []
for i in range(len(__lowercase ) ):
path.append(invpath[len(__lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase_ = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase_ = 99
lowerCamelCase_ , lowerCamelCase_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 79 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_flip_channel_order
def __a ( self , a , a , a = PIL.Image.BILINEAR , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a = None ):
return flip_channel_order(a , data_format=a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ = [self.flip_channel_order(image=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 80 |
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 81 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = DiTPipeline
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_snake_case , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 82 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 3 ):
_UpperCamelCase : List[Any] = min(UpperCAmelCase_ )
_UpperCamelCase : Dict = max(UpperCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCAmelCase_ ) for x in data]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 3 ):
_UpperCamelCase : str = mean(UpperCAmelCase_ )
_UpperCamelCase : Tuple = stdev(UpperCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , UpperCAmelCase_ ) for x in data]
| 83 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ :Any = 3
lowerCAmelCase_ :Tuple = (32, 32)
lowerCAmelCase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def __lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__A )
@property
def __lowerCAmelCase ( self ) -> int:
def extract(*__A , **__A ):
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> str:
lowerCAmelCase_ :List[str] = torch.ones([0] )
def __lowerCAmelCase ( self , __A ) -> int:
self.pixel_values.to(__A )
return self
return Out()
return extract
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Dict = self.dummy_cond_unet
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :int = self.dummy_vae
lowerCAmelCase_ :Union[str, Any] = self.dummy_text_encoder
lowerCAmelCase_ :Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase_ :Dict = 77
lowerCAmelCase_ :Tuple = self.dummy_image.to(__A )
lowerCAmelCase_ :Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Dict = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ :Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
lowerCAmelCase_ :Any = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Optional[Any] = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__A , )
lowerCAmelCase_ :Tuple = output.images
lowerCAmelCase_ :str = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :str = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__A , return_dict=__A , )[0]
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase_ :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :str = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = self.dummy_cond_unet
lowerCAmelCase_ :int = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :int = self.dummy_vae
lowerCAmelCase_ :Dict = self.dummy_text_encoder
lowerCAmelCase_ :List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase_ :Optional[Any] = 77
lowerCAmelCase_ :Optional[int] = self.dummy_image.to(__A )
# put models in fp16
lowerCAmelCase_ :Any = unet.half()
lowerCAmelCase_ :Union[str, Any] = vae.half()
lowerCAmelCase_ :Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Tuple = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
lowerCAmelCase_ :Union[str, Any] = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = alt_pipe(
[prompt] , generator=__A , num_inference_steps=2 , output_type="""np""" , image=__A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase_ :Tuple = init_image.resize((760, 504) )
lowerCAmelCase_ :str = """BAAI/AltDiffusion"""
lowerCAmelCase_ :str = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images[0]
lowerCAmelCase_ :List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCAmelCase_ :int = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Union[str, Any] = init_image.resize((768, 512) )
lowerCAmelCase_ :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowerCAmelCase_ :Union[str, Any] = """BAAI/AltDiffusion"""
lowerCAmelCase_ :Any = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Tuple = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :int = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = pipe(
prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 84 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "gpt_neox"
def __init__( self , a__=50_432 , a__=6_144 , a__=44 , a__=64 , a__=24_576 , a__="gelu" , a__=0.2_5 , a__=10_000 , a__=0.0 , a__=0.0 , a__=0.1 , a__=2_048 , a__=0.0_2 , a__=1e-5 , a__=True , a__=0 , a__=2 , a__=False , a__=True , a__=None , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
snake_case_ = self.rope_scaling.get("type" , a__ )
snake_case_ = self.rope_scaling.get("factor" , a__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(a__ , a__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 85 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = 'lilt'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=10_24 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : List[str] = type_vocab_size
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : int = position_embedding_type
__lowerCAmelCase : Dict = classifier_dropout
__lowerCAmelCase : str = channel_shrink_ratio
__lowerCAmelCase : Dict = max_ad_position_embeddings
| 86 |
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def lowercase_ ( ):
lowercase__ : Dict = Github(os.environ["GITHUB_TOKEN"])
lowercase__ : Optional[int] = g.get_repo("huggingface/transformers")
lowercase__ : Optional[Any] = repo.get_issues(state="open")
for issue in open_issues:
lowercase__ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase: i.created_at , reverse=_lowerCamelCase)
lowercase__ : Dict = comments[0] if len(_lowerCamelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main()
| 87 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger('transformers.models.speecht5')
def a__ ( A_, A_, A_ ):
'''simple docstring'''
hf_model.apply_weight_norm()
__magic_name__ = checkpoint["""input_conv.weight_g"""]
__magic_name__ = checkpoint["""input_conv.weight_v"""]
__magic_name__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
__magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
__magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
__magic_name__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__magic_name__ = checkpoint["""output_conv.1.weight_g"""]
__magic_name__ = checkpoint["""output_conv.1.weight_v"""]
__magic_name__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( A_, A_, A_, A_=None, A_=None, ):
'''simple docstring'''
if config_path is not None:
__magic_name__ = SpeechTaHifiGanConfig.from_pretrained(A_ )
else:
__magic_name__ = SpeechTaHifiGanConfig()
__magic_name__ = SpeechTaHifiGan(A_ )
__magic_name__ = torch.load(A_ )
load_weights(orig_checkpoint["""model"""]["""generator"""], A_, A_ )
__magic_name__ = np.load(A_ )
__magic_name__ = stats[0].reshape(-1 )
__magic_name__ = stats[1].reshape(-1 )
__magic_name__ = torch.from_numpy(A_ ).float()
__magic_name__ = torch.from_numpy(A_ ).float()
model.save_pretrained(A_ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 88 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'upernet'
def __init__( self : List[Any] ,_UpperCAmelCase : str=None ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : int=0.02 ,_UpperCAmelCase : int=[1, 2, 3, 6] ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=0.4 ,_UpperCAmelCase : Optional[int]=384 ,_UpperCAmelCase : Optional[Any]=256 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Tuple=255 ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : List[Any] = backbone_config.get('model_type' )
_a : List[str] = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_UpperCAmelCase )
_a : Optional[int] = backbone_config
_a : Union[str, Any] = hidden_size
_a : str = initializer_range
_a : Any = pool_scales
_a : str = use_auxiliary_head
_a : Tuple = auxiliary_loss_weight
_a : Optional[Any] = auxiliary_in_channels
_a : Union[str, Any] = auxiliary_channels
_a : List[str] = auxiliary_num_convs
_a : List[str] = auxiliary_concat_input
_a : Any = loss_ignore_index
def __lowercase ( self : Optional[int] ):
_a : int = copy.deepcopy(self.__dict__ )
_a : List[Any] = self.backbone_config.to_dict()
_a : Dict = self.__class__.model_type
return output
| 89 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
__lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCamelCase = dataset
__lowerCamelCase = name
__lowerCamelCase = con
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = to_sql_kwargs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.to_sql_kwargs.pop('sql' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('con' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('index' , lowerCamelCase__ )
__lowerCamelCase = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = args
__lowerCamelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas()
__lowerCamelCase = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowerCamelCase , __lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 90 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : Tuple , **lowercase_ : Tuple):
'''simple docstring'''
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a : int = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a : Optional[Any] = False
_a : Tuple = False
_a : Tuple = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
*get_values(_A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
__lowerCAmelCase = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(_A )
| 92 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tmp_path / '''cache'''
lowercase_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase_ : Dict = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Optional[Any] = tmp_path / '''cache'''
lowercase_ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase_ : Dict = features.copy() if features else default_expected_features
lowercase_ : Tuple = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__SCREAMING_SNAKE_CASE ) ) as con:
lowercase_ : List[Any] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Tuple = tmp_path / '''cache'''
lowercase_ : int = os.path.join(__SCREAMING_SNAKE_CASE , '''tmp.sql''' )
lowercase_ : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase_ : List[str] = iter_sql_file(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Dict = tmp_path / '''cache'''
lowercase_ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , '''tmp.sql''' )
lowercase_ : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase_ : Dict = iter_sql_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Optional[Any] = tmp_path / '''cache'''
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''tmp.sql''' )
lowercase_ : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
with pytest.raises(__SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 93 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
snake_case : List[str] = False
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Optional[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :str = 12
a :Optional[Any] = 12
a :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
a :Dict = TransformeraDModel(**_lowerCamelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''cpu'''
a :Optional[int] = self.dummy_vqvae
a :Any = self.dummy_text_encoder
a :Tuple = self.dummy_tokenizer
a :List[str] = self.dummy_transformer
a :str = VQDiffusionScheduler(self.num_embed )
a :Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
a :Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
a :List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :List[str] = '''teddy bear playing in the pool'''
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :List[str] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
a :List[str] = output.images
a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
a :List[Any] = image[0, -3:, -3:, -1]
a :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a :int = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = '''cpu'''
a :Union[str, Any] = self.dummy_vqvae
a :Optional[Any] = self.dummy_text_encoder
a :int = self.dummy_tokenizer
a :Optional[int] = self.dummy_transformer
a :Optional[int] = VQDiffusionScheduler(self.num_embed )
a :str = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
a :Tuple = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
a :int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = '''teddy bear playing in the pool'''
a :List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Tuple = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
a :Union[str, Any] = output.images
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :int = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
a :Union[str, Any] = image[0, -3:, -3:, -1]
a :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a :Optional[int] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
a :Any = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
a :List[Any] = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
a :Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 94 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : int = 0
_lowercase : bool = False
_lowercase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : int =GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
a__ : List[str] =Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
a__ : Optional[Any] =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : List[Any] =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase : str = torch.nn.Linear(100, 200)
UpperCAmelCase : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase : int = """"""
UpperCAmelCase : Union[str, Any] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 95 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ = logging.get_logger(__name__)
lowercase__ = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _snake_case ( lowercase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase : Optional[int] = model_type_to_module_name(lowercase__ )
_lowerCamelCase : List[str] = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , '__name__' , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase : Tuple = importlib.import_module('transformers' )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def _snake_case ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ):
_lowerCamelCase : Dict = get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowercase__ , encoding='utf-8' ) as reader:
return json.load(lowercase__ )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowercase )
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : List[Any] = kwargs.pop('config' , lowercase )
_lowerCamelCase : int = kwargs.pop('trust_remote_code' , lowercase )
_lowerCamelCase : Any = True
_lowerCamelCase, _lowerCamelCase : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(lowercase , **lowercase )
_lowerCamelCase : str = config_dict.get('feature_extractor_type' , lowercase )
_lowerCamelCase : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(lowercase , **lowercase )
# It could be in `config.feature_extractor_type``
_lowerCamelCase : Dict = getattr(lowercase , 'feature_extractor_type' , lowercase )
if hasattr(lowercase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCamelCase : int = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowerCamelCase : int = feature_extractor_class_from_name(lowercase )
_lowerCamelCase : List[str] = feature_extractor_auto_map is not None
_lowerCamelCase : Union[str, Any] = feature_extractor_class is not None or type(lowercase ) in FEATURE_EXTRACTOR_MAPPING
_lowerCamelCase : Dict = resolve_trust_remote_code(
lowercase , lowercase , lowercase , lowercase )
if has_remote_code and trust_remote_code:
_lowerCamelCase : Optional[Any] = get_class_from_dynamic_module(
lowercase , lowercase , **lowercase )
_lowerCamelCase : List[Any] = kwargs.pop('code_revision' , lowercase )
if os.path.isdir(lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase , **lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase , **lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCamelCase : Tuple = FEATURE_EXTRACTOR_MAPPING[type(lowercase )]
return feature_extractor_class.from_dict(lowercase , **lowercase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A_ ( lowercase , lowercase ):
FEATURE_EXTRACTOR_MAPPING.register(lowercase , lowercase )
| 96 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :int = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ :Union[str, Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
UpperCamelCase__ :Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :str = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = '''lower newer'''
UpperCamelCase__ :List[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(images=UpperCamelCase_ , visual_prompt=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Union[str, Any] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 97 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = to_pil_image(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = pil_image.size
UpperCAmelCase__ = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type='dict' , config=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ = [idx for idx, word in enumerate(lowerCamelCase ) if not word.strip()]
UpperCAmelCase__ = [word for idx, word in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ = []
for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase )
# finally, normalize the bounding boxes
UpperCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : int ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : Any ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_value
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ = apply_ocr
UpperCAmelCase__ = ocr_lang
UpperCAmelCase__ = tesseract_config
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[int] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'pytesseract' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCAmelCase__ = words_batch
UpperCAmelCase__ = boxes_batch
return data
| 98 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Tuple = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ :List[Any] = parser.parse_args()
lowercase__ :List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ :Dict = CLIPImageProcessor()
lowercase__ :Dict = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ :Any = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 101 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int = 4 ) ->list[list[int]]:
"""simple docstring"""
__snake_case : str = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : List[Any] = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : List[Any] = matrix[::-1]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : str = [x[::-1] for x in matrix]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->None:
"""simple docstring"""
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE : List[str] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE : Optional[int] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 102 |
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 103 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (PNDMScheduler,)
SCREAMING_SNAKE_CASE : Any = (('num_inference_steps', 5_0),)
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Union[str, Any] ):
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any=0 ,**lowercase__ : Union[str, Any] ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[int]=0 ,**lowercase__ : str ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
__lowercase = 1_0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ ,'''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ ,'''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_prk(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
__lowercase = scheduler.step_plms(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_plms(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowercase = 2_7
for scheduler_class in self.scheduler_classes:
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Tuple ):
with self.assertRaises(lowercase__ ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : str ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : str ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 104 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = BlipImageProcessor()
lowerCAmelCase__ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCAmelCase__ : Optional[Any] = BlipaProcessor(lowercase_ ,lowercase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ,**lowercase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowercase_ ).tokenizer
def __lowerCAmelCase ( self : Union[str, Any] ,**lowercase_ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowercase_ ).image_processor
def __lowerCAmelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
lowerCAmelCase__ : Any = [Image.fromarray(np.moveaxis(lowercase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
lowerCAmelCase__ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ ,padding_value=1.0 )
lowerCAmelCase__ : List[str] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=lowercase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Tuple = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : Dict = image_processor(lowercase_ ,return_tensors='''np''' )
lowerCAmelCase__ : Dict = processor(images=lowercase_ ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ )
lowerCAmelCase__ : Any = '''lower newer'''
lowerCAmelCase__ : Optional[Any] = processor(text=lowercase_ )
lowerCAmelCase__ : Dict = tokenizer(lowercase_ ,return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ )
lowerCAmelCase__ : Any = '''lower newer'''
lowerCAmelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = processor(text=lowercase_ ,images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : Dict = processor.batch_decode(lowercase_ )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ )
lowerCAmelCase__ : List[Any] = '''lower newer'''
lowerCAmelCase__ : Tuple = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = processor(text=lowercase_ ,images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 106 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=[0, 1, 2, 3] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=[1, 3_84, 24, 24] , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , ) -> Optional[int]:
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = backbone_out_indices
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = num_labels
a = backbone_featmap_shape
a = scope
a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = DPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> str:
a = self.num_labels
a = DPTForDepthEstimation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Any:
a = self.num_labels
a = DPTForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str ) -> str:
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : int ) -> List[str]:
a = DPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
if model_class in get_values(__lowerCamelCase ):
continue
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a = model(**__lowerCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self : Dict ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = False
a = True
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a = model(**__lowerCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
a = model_class(config=__lowerCamelCase )
# Skip the check for the backbone
a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@slow
def __UpperCAmelCase ( self : str ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a = DPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = "add"
with self.assertRaises(__lowerCamelCase ):
a = DPTForDepthEstimation(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
a = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__lowerCamelCase )
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
a = outputs.predicted_depth
# verify the predicted depth
a = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __lowerCamelCase )
a = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __lowerCamelCase , atol=1e-4 ) )
| 107 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase ):
"""simple docstring"""
a : str =["transformers", "torch", "note_seq"]
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 108 |
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Union[str, Any] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Any = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__A = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
__A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Tuple = ["input_ids", "attention_mask"]
_UpperCAmelCase :Dict = MBartTokenizer
_UpperCAmelCase :Union[str, Any] = []
_UpperCAmelCase :Dict = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
lowercase__: Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowercase__: int = vocab_file
lowercase__: Tuple = False if not self.vocab_file else True
lowercase__: Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase__: List[str] = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__: int = src_lang if src_lang is not None else '''en_XX'''
lowercase__: int = self.convert_tokens_to_ids(self._src_lang )
lowercase__: int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Dict = [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__: Union[str, Any] = src_lang
lowercase__: str = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
lowercase__: Dict = self.convert_tokens_to_ids(__UpperCAmelCase )
lowercase__: int = tgt_lang_id
return inputs
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = "en_XX" , _UpperCAmelCase = None , _UpperCAmelCase = "ro_RO" , **_UpperCAmelCase , ):
lowercase__: int = src_lang
lowercase__: Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[Any] = self.convert_tokens_to_ids(__UpperCAmelCase )
lowercase__: List[Any] = []
lowercase__: Any = [self.eos_token_id, self.cur_lang_code]
lowercase__: Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: Any = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.convert_tokens_to_ids(__UpperCAmelCase )
lowercase__: Any = []
lowercase__: List[Any] = [self.eos_token_id, self.cur_lang_code]
lowercase__: Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowercase__: List[str] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 177 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case : str = [8, 5, 9, 7]
_snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :int , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , ):
A = claim_vector
A = allocated_resources_table
A = maximum_claim_table
def lowerCamelCase ( self :str ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCamelCase ( self :List[str] ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCamelCase ( self :str ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCamelCase ( self :str ):
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def lowerCamelCase ( self :Optional[int] , **__UpperCamelCase :Dict ):
A = self.__need()
A = self.__allocated_resources_table
A = self.__available_resources()
A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A = False
for each_need in need_list:
A = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
A = False
break
if execution:
A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
A = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCamelCase ( self :Union[str, Any] ):
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
__a : Optional[Any] = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : str = {}
__a : int = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__a : int = key.replace(f"{group_key}." , f"{group_key}.group." )
if "res_path" in key:
__a : int = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__a : Union[str, Any] = rreplace(lowerCAmelCase__ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__a : List[Any] = rreplace(lowerCAmelCase__ , '''.b''' , '''.bias''' , 1 )
__a : Any = value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=True ):
from dall_e import Encoder
__a : int = Encoder()
if os.path.exists(lowerCAmelCase__ ):
__a : Dict = torch.load(lowerCAmelCase__ )
else:
__a : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Optional[int] = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
__a : List[str] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
__a : Tuple = FlavaImageCodebookConfig()
__a : Optional[int] = FlavaImageCodebook(lowerCAmelCase__ ).eval()
__a : List[str] = encoder.state_dict()
__a : Any = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
__a : List[Any] = hf_model.state_dict()
__a : List[str] = count_parameters(lowerCAmelCase__ )
__a : int = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase__ =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 216 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316 | 0 |
import numpy as np
def lowercase_ ( _A : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _A : np.array ):
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE) , """Tatoeba directory does not exist.""")
class lowercase ( unittest.TestCase):
@cached_property
def a_ ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ : List[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 167 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_name
if "patch_embed" in old_name:
lowercase_ , lowercase_ , lowercase_ = old_name.split(""".""" )
if layer == "0":
lowercase_ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
lowercase_ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
lowercase_ = old_name.replace("""3""" , """convolution2""" )
else:
lowercase_ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , __lowerCAmelCase ):
lowercase_ = R"""\b\d{2}\b"""
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
lowercase_ = re.search(R"""\d\.\d\d.""" , __lowerCAmelCase ).group()
else:
lowercase_ = re.search(R"""\d\.\d.""" , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
lowercase_ = old_name.replace(__lowerCAmelCase , """""" )
lowercase_ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
lowercase_ = """intermediate_stages.""" + trimmed_name
else:
lowercase_ = old_name.replace(__lowerCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase_ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
lowercase_ = str(int(match[2] ) - num_meta4D_last_stage )
lowercase_ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
lowercase_ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
lowercase_ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
lowercase_ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
lowercase_ = trimmed_name.replace("""fc2""" , """linear_out""" )
lowercase_ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , __lowerCAmelCase ):
lowercase_ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
lowercase_ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase_ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase_ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
lowercase_ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
lowercase_ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
lowercase_ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
lowercase_ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase_ = new_name.replace("""norm""" , """layernorm""" )
lowercase_ = """efficientformer.""" + new_name
else:
lowercase_ = """efficientformer.encoder.""" + new_name
return new_name
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
for key in checkpoint.copy().keys():
lowercase_ = checkpoint.pop(__lowerCAmelCase )
lowercase_ = val
return checkpoint
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowercase_ = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
lowercase_ = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
lowercase_ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
lowercase_ = config.depths[-1] - config.num_metaad_blocks + 1
lowercase_ = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
lowercase_ = prepare_img()
lowercase_ = 2_56
lowercase_ = 2_24
lowercase_ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
lowercase_ = processor(images=__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
lowercase_ = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
lowercase_ = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = model(__lowerCAmelCase )
lowercase_ = outputs.logits
lowercase_ = (1, 10_00)
if "l1" in model_name:
lowercase_ = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase_ = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase_ = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(__lowerCAmelCase )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 136 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: int = [0] * len(_SCREAMING_SNAKE_CASE )
a__: List[Any] = []
a__: List[str] = []
a__: Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
a__: Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(_SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
if cnt != len(_SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(_SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
lowercase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 290 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase ( a__ : int ) -> Optional[int]:
_UpperCamelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCamelCase = flatten_dict(a__ )
return flax_params
def lowercase ( a__ : List[str] ) -> Tuple:
_UpperCamelCase = {}
_UpperCamelCase = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
_UpperCamelCase = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_UpperCamelCase = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_UpperCamelCase = new_key.replace(a__ , a__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_UpperCamelCase = new_key.replace(a__ , a__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_UpperCamelCase = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , a__ )
_UpperCamelCase = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_UpperCamelCase = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , a__ )
_UpperCamelCase = flax_dict[key]
_UpperCamelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_UpperCamelCase = torch.from_numpy(converted_dict[key].T )
else:
_UpperCamelCase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase ( a__ : int , a__ : List[Any] , a__ : Optional[Any]=False , a__ : str=False ) -> Dict:
_UpperCamelCase = get_flax_param(a__ )
if not use_large:
_UpperCamelCase = PixaStructVisionConfig()
_UpperCamelCase = PixaStructTextConfig()
else:
_UpperCamelCase = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
_UpperCamelCase = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
_UpperCamelCase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=a__ )
_UpperCamelCase = PixaStructForConditionalGeneration(a__ )
_UpperCamelCase = rename_and_convert_flax_params(a__ )
model.load_state_dict(a__ )
_UpperCamelCase = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
_UpperCamelCase = PixaStructImageProcessor()
_UpperCamelCase = PixaStructProcessor(image_processor=a__ , tokenizer=a__ )
if use_large:
_UpperCamelCase = 4096
_UpperCamelCase = True
# mkdir if needed
os.makedirs(a__ , exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
print('''Model saved in {}'''.format(a__ ) )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
UpperCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 256 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case__ : str = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowerCAmelCase (cls :Dict )-> int:
__A = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def _lowerCAmelCase (cls :Tuple )-> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
__A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__A = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase , repo_id='''test-config''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__A = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def _lowerCAmelCase (self :int )-> int:
__A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__A = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-config-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__A = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def _lowerCAmelCase (self :Any )-> Union[str, Any]:
CustomConfig.register_for_auto_class()
__A = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__A = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Dict:
__A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__A = c.n_embd + 1 # int
__A = c.resid_pdrop + 1.0 # float
__A = not c.scale_attn_weights # bool
__A = c.summary_type + '''foo''' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(__UpperCAmelCase , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(__UpperCAmelCase , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(__UpperCAmelCase , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(__UpperCAmelCase , c.summary_type , '''mismatch for key: summary_type''' )
def _lowerCAmelCase (self :Optional[int] )-> Any:
__A = PretrainedConfig()
__A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__UpperCAmelCase , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__A = [key for key, value in config_common_kwargs.items() if value == getattr(__UpperCAmelCase , __UpperCAmelCase )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f""" {', '.join(__UpperCAmelCase )}.""" )
def _lowerCAmelCase (self :str )-> Optional[int]:
with self.assertRaises(__UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__A = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__A = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCAmelCase (self :int )-> Any:
__A = mock.Mock()
__A = 500
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__UpperCAmelCase ) as mock_head:
__A = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase (self :Tuple )-> Any:
__A = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def _lowerCAmelCase (self :List[Any] )-> Any:
__A = AutoConfig.from_pretrained('''bert-base-cased''' )
__A = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__UpperCAmelCase )
__A = 2
json.dump(configuration.to_dict() , open(os.path.join(__UpperCAmelCase , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__A = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__A = ['''config.42.0.0.json''']
__A = 768
configuration.save_pretrained(__UpperCAmelCase )
shutil.move(os.path.join(__UpperCAmelCase , '''config.4.0.0.json''' ) , os.path.join(__UpperCAmelCase , '''config.42.0.0.json''' ) )
__A = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 768 )
def _lowerCAmelCase (self :Tuple )-> List[Any]:
__A = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__A = '''v4.0.0'''
__A , __A = new_transformers.models.auto.AutoConfig.from_pretrained(
__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__A = '''v3.0.0'''
__A = old_transformers.models.auto.AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 768 )
| 117 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
_lowercase : Any = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def lowerCamelCase ( UpperCAmelCase__ : float ) -> str:
assert type(UpperCAmelCase__ ) in (int, float) and decimal == int(UpperCAmelCase__ )
lowercase_ : int = int(UpperCAmelCase__ )
lowercase_ : Optional[int] = """"""
lowercase_ : Optional[int] = False
if decimal < 0:
lowercase_ : Any = True
decimal *= -1
while decimal > 0:
lowercase_ , lowercase_ : List[str] = divmod(UpperCAmelCase__ , 16 )
lowercase_ : Optional[int] = values[remainder] + hexadecimal
lowercase_ : List[Any] = """0x""" + hexadecimal
if negative:
lowercase_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None , _lowerCamelCase : dict[str, float] | None = None , _lowerCamelCase : bool = False , ):
lowercase__ : List[Any] = cipher_alphabet or [chr(_lowerCamelCase) for i in range(97 , 123)]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowercase__ : Tuple = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
lowercase__ : str = frequencies_dict
if not case_sensitive:
lowercase__ : List[str] = ciphertext.lower()
# Chi squared statistic values
lowercase__ : Any = {}
# cycle through all of the shifts
for shift in range(len(_lowerCamelCase)):
lowercase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowercase__ : Optional[int] = (alphabet_letters.index(letter.lower()) - shift) % len(
_lowerCamelCase)
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowercase__ : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowercase__ : Dict = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : int = decrypted_with_shift.lower().count(_lowerCamelCase)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : Any = decrypted_with_shift.count(_lowerCamelCase)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowercase__ : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCamelCase : int) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowercase__ : int = min(
_lowerCamelCase , key=_lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 87 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=[0, 1, 2, 3] , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=[1, 384, 24, 24] , _UpperCAmelCase=True , _UpperCAmelCase=None , ):
lowercase__: List[Any] = parent
lowercase__: Any = batch_size
lowercase__: Union[str, Any] = image_size
lowercase__: str = patch_size
lowercase__: str = num_channels
lowercase__: List[Any] = is_training
lowercase__: Any = use_labels
lowercase__: Optional[int] = hidden_size
lowercase__: Any = num_hidden_layers
lowercase__: int = backbone_out_indices
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: str = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Any = initializer_range
lowercase__: List[str] = num_labels
lowercase__: Optional[Any] = backbone_featmap_shape
lowercase__: Dict = scope
lowercase__: List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__: Tuple = (image_size // patch_size) ** 2
lowercase__: Optional[int] = num_patches + 1
def _snake_case ( self ):
lowercase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[str] = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__: List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
lowercase__: Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = self.num_labels
lowercase__: Tuple = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = self.num_labels
lowercase__: Dict = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self ):
lowercase__: Any = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: Tuple = config_and_inputs
lowercase__: Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_UpperCAmelCase :Any = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
lowercase__: int = DPTModelTester(self )
lowercase__: Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Any = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _snake_case ( self ):
lowercase__, lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(__UpperCAmelCase )
lowercase__: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Tuple = [*signature.parameters.keys()]
lowercase__: Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def _snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = True
if model_class in get_values(__UpperCAmelCase ):
continue
lowercase__: Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowercase__: List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowercase__: Dict = model(**__UpperCAmelCase ).loss
loss.backward()
def _snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__, lowercase__: str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Any = False
lowercase__: Tuple = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__: Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__: Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowercase__: str = model(**__UpperCAmelCase ).loss
loss.backward()
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: int = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__: Any = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
lowercase__: Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__: Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__: Optional[int] = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _snake_case ( self ):
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Tuple = '''add'''
with self.assertRaises(__UpperCAmelCase ):
lowercase__: str = DPTForDepthEstimation(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Tuple = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__: str = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__UpperCAmelCase )
lowercase__: str = prepare_img()
lowercase__: Any = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__: List[str] = model(**__UpperCAmelCase )
lowercase__: Optional[int] = outputs.predicted_depth
# verify the predicted depth
lowercase__: Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
lowercase__: Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 177 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase = GPTaTokenizer
UpperCamelCase = GPTaTokenizerFast
UpperCamelCase = True
UpperCamelCase = {'''add_prefix_space''': True}
UpperCamelCase = False
def lowerCamelCase ( self :List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
A = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A = {"unk_token": "<unk>"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def lowerCamelCase ( self :Dict , **__UpperCamelCase :Any ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any ):
A = "lower newer"
A = "lower newer"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = "lower newer"
A = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
A = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
A = tokens + [tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self :Optional[int] ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
A = "lower newer"
# Testing tokenization
A = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
A = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids without special tokens
A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids with special tokens
A = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
A = tokenizer.encode(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
A = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing the unknown token
A = tokens + [rust_tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self :Optional[Any] , *__UpperCamelCase :Any , **__UpperCamelCase :Union[str, Any] ):
pass
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# Simple input
A = "This is a simple input"
A = ["This is a simple input 1", "This is a simple input 2"]
A = ("This is a simple input", "This is a pair")
A = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
def lowerCamelCase ( self :Optional[Any] ):
A = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
A = "This is a simple input"
A = ["This is a simple input looooooooong", "This is a simple input"]
A = ("This is a simple input", "This is a pair")
A = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
A = tokenizer.pad_token_id
A = tokenizer(__UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
A = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors="np" )
A = tokenizer(*__UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
A = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self :Any ):
A = "$$$"
A = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )
A = "This is a simple input"
A = ["This is a simple input 1", "This is a simple input 2"]
A = tokenizer.bos_token_id
A = tokenizer(__UpperCAmelCase )
A = tokenizer(__UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A = tokenizer.decode(out_s.input_ids )
A = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self :List[str] ):
pass
def lowerCamelCase ( self :List[Any] ):
A = [self.get_tokenizer(do_lower_case=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A = "Encode this."
A = "This one too please."
A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
encoded_sequence += tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
A = tokenizer.encode_plus(
__UpperCAmelCase , __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , )
A = encoded_sequence_dict["input_ids"]
A = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
A = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__UpperCAmelCase )
]
A = [x for x in filtered_sequence if x is not None]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :int ):
A = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__UpperCAmelCase )
A = "A photo of a cat"
A = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
A = AutoTokenizer.from_pretrained("./test_opt" )
A = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self :Optional[Any] ):
A = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__UpperCAmelCase )
A = "A photo of a cat"
A = tokenizer.encode(
__UpperCAmelCase , )
# Same as above
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self :List[str] ):
A = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__UpperCAmelCase )
A = "bos"
A = tokenizer.get_vocab()["bos"]
A = "A photo of a cat"
A = tokenizer.encode(
__UpperCAmelCase , )
# We changed the bos token
self.assertEqual(__UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
A = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
A = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 292 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = ConsistencyModelPipeline
_SCREAMING_SNAKE_CASE : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_SCREAMING_SNAKE_CASE : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def lowerCAmelCase (self : Any ):
__a : List[str] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def lowerCAmelCase (self : str ):
__a : Dict = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any]=False ):
if class_cond:
__a : Union[str, Any] = self.dummy_cond_unet
else:
__a : Tuple = self.dummy_uncond_unet
# Default to CM multistep sampler
__a : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
__a : int = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Optional[Any] , snake_case_ : int=0 ):
if str(__UpperCAmelCase ).startswith('''mps''' ):
__a : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
__a : Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__a : Any = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : int = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Any = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Any = self.get_dummy_inputs(__UpperCAmelCase )
__a : List[str] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a : Any = image[0, -3:, -3:, -1]
__a : Tuple = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase (self : Any ):
__a : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Any = self.get_dummy_components(class_cond=__UpperCAmelCase )
__a : List[str] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Dict = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : str = self.get_dummy_inputs(__UpperCAmelCase )
__a : Any = 0
__a : List[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a : Tuple = image[0, -3:, -3:, -1]
__a : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase (self : Dict ):
__a : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : Optional[Any] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Tuple = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
__a : str = 1
__a : List[str] = None
__a : List[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a : Optional[Any] = image[0, -3:, -3:, -1]
__a : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase (self : List[Any] ):
__a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components(class_cond=__UpperCAmelCase )
__a : Optional[int] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : List[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
__a : Optional[int] = 1
__a : str = None
__a : Tuple = 0
__a : List[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a : Optional[int] = image[0, -3:, -3:, -1]
__a : Optional[int] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Dict , snake_case_ : int=0 , snake_case_ : Dict=False , snake_case_ : Tuple="cpu" , snake_case_ : Any=torch.floataa , snake_case_ : List[str]=(1, 3, 6_4, 6_4) ):
__a : Dict = torch.manual_seed(__UpperCAmelCase )
__a : Any = {
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__a : Union[str, Any] = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
__a : Dict = latents
return inputs
def lowerCAmelCase (self : str , snake_case_ : int=0 , snake_case_ : Tuple="cpu" , snake_case_ : List[Any]=torch.floataa , snake_case_ : Tuple=(1, 3, 6_4, 6_4) ):
if type(__UpperCAmelCase ) == str:
__a : Any = torch.device(__UpperCAmelCase )
__a : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__a : Tuple = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__a : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
__a : List[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : List[str] = self.get_inputs()
__a : Tuple = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a : List[str] = image[0, -3:, -3:, -1]
__a : Any = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase (self : List[Any] ):
__a : Dict = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__a : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
__a : Union[str, Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Any = self.get_inputs()
__a : Dict = 1
__a : Any = None
__a : Dict = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a : Optional[int] = image[0, -3:, -3:, -1]
__a : str = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__a : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
__a : List[str] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Optional[int] = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
__a : Tuple = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a : int = image[0, -3:, -3:, -1]
__a : List[Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def lowerCAmelCase (self : Dict ):
__a : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__a : str = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
__a : int = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Tuple = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
__a : Tuple = 1
__a : Tuple = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
__a : Tuple = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a : str = image[0, -3:, -3:, -1]
__a : Union[str, Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 216 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 0 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 184 |
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Any = tf.cast(math.pi , x.dtype )
A_ : Union[str, Any] = tf.cast(0.04_4715 , x.dtype )
A_ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCAmelCase , 3 )) ))
return x * cdf
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = tf.convert_to_tensor(_UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(_UpperCAmelCase ) )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Optional[int] = tf.cast(0.04_4715 , x.dtype )
A_ : List[Any] = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Any = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return tf.clip_by_value(_gelu(_UpperCAmelCase ) , -10 , 10 )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=-1 ):
"""simple docstring"""
A_ , A_ : int = tf.split(_UpperCAmelCase , 2 , axis=_UpperCAmelCase )
return a * tf.math.sigmoid(_UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return tf.keras.activations.gelu(_UpperCAmelCase , approximate=_UpperCAmelCase )
_lowerCamelCase : List[str] = tf.keras.activations.gelu
_lowerCamelCase : Optional[int] = approximate_gelu_wrap
else:
_lowerCamelCase : int = _gelu
_lowerCamelCase : List[str] = _gelu_new
_lowerCamelCase : Optional[Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 167 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316 | 0 |
"""simple docstring"""
from ....utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=2_0_4_8):
"""simple docstring"""
lowercase_ = config.__dict__
lowercase_ = modal_hidden_size
if num_labels:
lowercase_ = num_labels
| 136 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
a__ = ["""vqvae"""]
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase) else 10_00
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase=True , ) -> int:
'''simple docstring'''
a__: Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase)
a__: str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a__: List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__: str = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
a__: List[str] = noise
a__: Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase)
a__: str = self.mel.audio_slice_to_image(__UpperCAmelCase)
a__: Optional[int] = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a__: Any = (input_image / 2_55) * 2 - 1
a__: int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a__: Tuple = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0)).latent_dist.sample(
generator=__UpperCAmelCase)[0]
a__: Union[str, Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__: List[str] = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1])
a__: Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__: Any = int(mask_start_secs * pixels_per_second)
a__: Optional[Any] = int(mask_end_secs * pixels_per_second)
a__: int = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __UpperCAmelCase):
a__: Optional[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)['sample']
else:
a__: int = self.unet(__UpperCAmelCase , __UpperCAmelCase)['sample']
if isinstance(self.scheduler , __UpperCAmelCase):
a__: int = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
else:
a__: List[Any] = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
a__: str = mask[:, step, :, :mask_start]
if mask_end > 0:
a__: List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__: List[Any] = 1 / self.vqvae.config.scaling_factor * images
a__: Optional[int] = self.vqvae.decode(__UpperCAmelCase)['sample']
a__: Optional[Any] = (images / 2 + 0.5).clamp(0 , 1)
a__: Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a__: Optional[int] = (images * 2_55).round().astype('uint8')
a__: Optional[Any] = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode='RGB').convert('L') for _ in images))
a__: Optional[int] = [self.mel.image_to_audio(__UpperCAmelCase) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase)[:, np.newaxis, :]) , **ImagePipelineOutput(__UpperCAmelCase))
@torch.no_grad()
def lowerCamelCase_ ( self , lowercase , lowercase = 50) -> Tuple:
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase)
self.scheduler.set_timesteps(__UpperCAmelCase)
a__: Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a__: Optional[int] = (sample / 2_55) * 2 - 1
a__: Optional[Any] = torch.Tensor(__UpperCAmelCase).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a__: str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__: Optional[int] = self.scheduler.alphas_cumprod[t]
a__: str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__: Any = 1 - alpha_prod_t
a__: List[str] = self.unet(__UpperCAmelCase , __UpperCAmelCase)['sample']
a__: Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a__: str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__: Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: int = acos(torch.dot(torch.flatten(__UpperCAmelCase) , torch.flatten(__UpperCAmelCase)) / torch.norm(__UpperCAmelCase) / torch.norm(__UpperCAmelCase))
return sin((1 - alpha) * theta) * xa / sin(__UpperCAmelCase) + sin(alpha * theta) * xa / sin(__UpperCAmelCase)
| 290 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
def __init__( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict = None , __UpperCamelCase : List[str] = None , __UpperCamelCase : str = None , __UpperCamelCase : int = False , __UpperCamelCase : Optional[Any] = False , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : int = None , **__UpperCamelCase : int , ) -> Optional[Any]:
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def _UpperCamelCase ( self : int ) -> str:
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : Union[str, Any] = None , **__UpperCamelCase : Union[str, Any] , ) -> Dict:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
_UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Tuple ) -> str:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any , **__UpperCamelCase : Dict , ) -> List[Any]:
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 256 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar('KEY')
snake_case__ : List[str] = TypeVar('VAL')
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class A_ ( Generic[KEY, VAL] ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class A_ ( _Item ):
def __init__(self :Union[str, Any] )-> Any:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__(self :Union[str, Any] )-> Dict:
return False
snake_case__ : Any = _DeletedItem()
class A_ ( MutableMapping[KEY, VAL] ):
def __init__(self :str , _UpperCamelCase :Union[str, Any] = 8 , _UpperCamelCase :Tuple = 0.7_5 )-> Optional[Any]:
__A = initial_block_size
__A = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__A = capacity_factor
__A = 0
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Tuple )-> int:
return hash(__UpperCAmelCase ) % len(self._buckets )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str )-> List[Any]:
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Any , _UpperCamelCase :Any , _UpperCamelCase :Union[str, Any] )-> Optional[Any]:
__A = self._buckets[ind]
if not stored:
__A = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__A = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def _lowerCAmelCase (self :Tuple )-> str:
__A = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def _lowerCAmelCase (self :str )-> Optional[int]:
if len(self._buckets ) <= self._initial_block_size:
return False
__A = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase (self :str , _UpperCamelCase :Optional[int] )-> Union[str, Any]:
__A = self._buckets
__A = [None] * new_size
__A = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowerCAmelCase (self :str )-> int:
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase (self :str )-> Optional[Any]:
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase (self :int , _UpperCamelCase :str )-> Tuple:
__A = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__A = self._get_next_ind(__UpperCAmelCase )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Optional[Any]:
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__(self :str , _UpperCamelCase :str , _UpperCamelCase :Union[str, Any] )-> Tuple:
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__(self :Optional[Any] , _UpperCamelCase :Optional[Any] )-> Union[str, Any]:
for ind in self._iterate_buckets(__UpperCAmelCase ):
__A = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__A = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self :Any , _UpperCamelCase :List[str] )-> Any:
for ind in self._iterate_buckets(__UpperCAmelCase ):
__A = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__(self :Optional[Any] )-> Dict:
return self._len
def __iter__(self :Union[str, Any] )-> Dict:
yield from (item.key for item in self._buckets if item)
def __repr__(self :Optional[int] )-> Union[str, Any]:
__A = ''' ,'''.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 117 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __magic_name__ ( torch.nn.Module):
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]="sayef/fsner-bert-base-uncased" ):
super(__UpperCAmelCase , self ).__init__()
lowercase_ : Union[str, Any] = AutoModel.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
lowercase_ : Tuple = torch.nn.CosineSimilarity(3 , 1E-08 )
lowercase_ : Any = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : int ):
return self.bert(**__UpperCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] ):
return token_embeddings.sum(2 , keepdim=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str=1 ):
return self.softmax(T * self.cos(__UpperCAmelCase , __UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : str = W_supports["""sizes"""].tolist()
lowercase_ : Optional[int] = W_supports["""start_token_id"""].item()
lowercase_ : Dict = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : Optional[Any] = self.BERT(**__UpperCAmelCase )
lowercase_ : Any = self.BERT(**__UpperCAmelCase )
lowercase_ : Tuple = None
lowercase_ : List[Any] = None
lowercase_ : Dict = W_supports["""input_ids"""] == start_token_id
lowercase_ : Any = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(__UpperCAmelCase ):
if i == 0:
lowercase_ : str = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : str = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Optional[int] = torch.vstack((p_starts, p_start) )
lowercase_ : str = torch.vstack((p_ends, p_end) )
else:
lowercase_ : Any = p_start
lowercase_ : str = p_end
return p_starts, p_ends
| 239 |
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from math import ceil, sqrt
def lowercase_ ( _lowerCamelCase : int = 100_0000):
lowercase__ : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2):
if outer_width**2 > limit:
lowercase__ : int = max(ceil(sqrt(outer_width**2 - limit)) , 1)
else:
lowercase__ : List[Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 87 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :str = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = 8 , **_UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowercase__: int = do_rescale
lowercase__: Tuple = rescale_factor
lowercase__: Dict = do_pad
lowercase__: List[Any] = pad_size
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__, lowercase__: Optional[Any] = get_image_size(__UpperCAmelCase )
lowercase__: Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__: str = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: int = do_pad if do_pad is not None else self.do_pad
lowercase__: int = pad_size if pad_size is not None else self.pad_size
lowercase__: Optional[Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__: int = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__: List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
lowercase__: Optional[Any] = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
lowercase__: List[str] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowercase__: Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 177 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = '''gptsan-japanese'''
UpperCamelCase = [
'''past_key_values''',
]
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Dict , __UpperCamelCase :Dict=3_60_00 , __UpperCamelCase :Any=12_80 , __UpperCamelCase :Tuple=10_24 , __UpperCamelCase :Union[str, Any]=81_92 , __UpperCamelCase :Dict=40_96 , __UpperCamelCase :List[str]=1_28 , __UpperCamelCase :List[Any]=10 , __UpperCamelCase :Optional[Any]=0 , __UpperCamelCase :Tuple=16 , __UpperCamelCase :Union[str, Any]=16 , __UpperCamelCase :Optional[Any]=1_28 , __UpperCamelCase :int=0.0 , __UpperCamelCase :Optional[Any]=1e-5 , __UpperCamelCase :Tuple=False , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :List[str]="float32" , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=False , __UpperCamelCase :int=False , __UpperCamelCase :Optional[int]=0.002 , __UpperCamelCase :Optional[Any]=False , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[Any]=3_59_98 , __UpperCamelCase :Dict=3_59_95 , __UpperCamelCase :Optional[Any]=3_59_99 , **__UpperCamelCase :Dict , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = d_ff
A = d_ext
A = d_spout
A = num_switch_layers
A = num_ext_layers
A = num_switch_layers + num_ext_layers
A = num_heads
A = num_experts
A = expert_capacity
A = dropout_rate
A = layer_norm_epsilon
A = router_bias
A = router_jitter_noise
A = router_dtype
A = router_ignore_padding_tokens
A = output_hidden_states
A = output_attentions
A = initializer_factor
A = output_router_logits
A = use_cache
super().__init__(
separator_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 292 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
lowercase__ ="Hello world! cécé herlolip"
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
__a : str = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
__a : Union[str, Any] = roberta.model.encoder.sentence_encoder
__a : Dict = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__a : List[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
__a : Optional[Any] = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a : List[Any] = roberta_sent_encoder.embed_tokens.weight
__a : Tuple = roberta_sent_encoder.embed_positions.weight
__a : Tuple = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__a : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
__a : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a : int = model.roberta.encoder.layer[i]
__a : Any = roberta_sent_encoder.layers[i]
__a : str = layer.attention
__a : Tuple = roberta_layer.self_attn_layer_norm.weight
__a : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
__a : Any = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__a : List[Any] = roberta_layer.self_attn.q_proj.weight
__a : List[str] = roberta_layer.self_attn.q_proj.bias
__a : List[str] = roberta_layer.self_attn.k_proj.weight
__a : List[str] = roberta_layer.self_attn.k_proj.bias
__a : Optional[int] = roberta_layer.self_attn.v_proj.weight
__a : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
__a : str = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__a : str = roberta_layer.self_attn.out_proj.weight
__a : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__a : Optional[Any] = roberta_layer.final_layer_norm.weight
__a : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
__a : Union[str, Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__a : Union[str, Any] = roberta_layer.fca.weight
__a : Optional[Any] = roberta_layer.fca.bias
# output
__a : str = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__a : Any = roberta_layer.fca.weight
__a : int = roberta_layer.fca.bias
# end of layer
if classification_head:
__a : Optional[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
__a : int = roberta.model.classification_heads['''mnli'''].dense.bias
__a : int = roberta.model.classification_heads['''mnli'''].out_proj.weight
__a : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__a : Any = roberta.model.encoder.lm_head.dense.weight
__a : Optional[Any] = roberta.model.encoder.lm_head.dense.bias
__a : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight
__a : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
__a : str = roberta.model.encoder.lm_head.weight
__a : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a : str = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
__a : Dict = model(lowerCAmelCase__ )[0]
if classification_head:
__a : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) )
else:
__a : Optional[Any] = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
__a : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__a : Union[str, Any] = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowercase__ =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 216 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316 | 0 |
def lowercase_ ( _A : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowerCamelCase__ : Optional[Any] = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict = numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = min_till_now, max_till_now
lowerCamelCase__ : Dict = max(_A , max_till_now * number )
lowerCamelCase__ : Tuple = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Optional[Any] = max(_A , _A )
return max_prod
| 184 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase_ ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class lowercase :
__lowerCAmelCase : int = field(
metadata={"""help""": """The csv file to plot."""} , )
__lowerCAmelCase : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
__lowerCAmelCase : List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
__lowerCAmelCase : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
__lowerCAmelCase : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
__lowerCAmelCase : Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
__lowerCAmelCase : Optional[int] = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""})
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
try:
int(_UpperCAmelCase )
return True
except ValueError:
return False
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
try:
float(_UpperCAmelCase )
return True
except ValueError:
return False
class lowercase :
def __init__( self : Optional[Any] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : str = args
A_ : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
A_ : int = csv.DictReader(__UpperCAmelCase )
for row in reader:
A_ : List[str] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
A_ : Dict = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
A_ : str = float(row['''result'''] )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Dict = plt.subplots()
A_ : Any = '''Time usage''' if self.args.is_time else '''Memory usage'''
A_ : Optional[int] = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A_ : List[str] = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
A_ : int = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
A_ : Tuple = self.result_dict[model_name]['''result''']
((A_) , (A_)) : List[str] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A_ : List[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A_ : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCAmelCase , )
else:
A_ : Union[str, Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((A_) , (A_)) : Any = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
A_ : List[str] = np.asarray(__UpperCAmelCase , __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase , __UpperCAmelCase , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__UpperCAmelCase , __UpperCAmelCase , '''--''' )
title_str += F""" {label_model_name} vs."""
A_ : str = title_str[:-4]
A_ : Any = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase_ ( ):
"""simple docstring"""
A_ : str = HfArgumentParser(_UpperCAmelCase )
A_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
A_ : Tuple = Plot(args=_UpperCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 167 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase : Optional[Any] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple = " "):
"""simple docstring"""
lowercase_ = sentence_delimiter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Any):
"""simple docstring"""
return list(__UpperCAmelCase)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = []
for sent_idx, sentence in enumerate(__UpperCAmelCase):
chars.extend(self.process_string(__UpperCAmelCase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase) - 1:
chars.append(self.sentence_delimiter)
return chars
UpperCAmelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase : List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase : Any = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase : Optional[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCAmelCase : Dict = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=False):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase):
lowercase_ = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 136 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
"""simple docstring"""
class __snake_case :
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = 0
a__: int = 0
a__: List[Any] = {}
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if vertex not in self.adjacency:
a__: List[Any] = {}
self.num_vertices += 1
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
self.add_vertex(__UpperCAmelCase)
self.add_vertex(__UpperCAmelCase)
if head == tail:
return
a__: Any = weight
a__: Optional[int] = weight
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = self.get_edges()
for edge in edges:
a__ , a__ , a__: List[str] = edge
edges.remove((tail, head, weight))
for i in range(len(__UpperCAmelCase)):
a__: Optional[Any] = list(edges[i])
edges.sort(key=lambda lowercase: e[2])
for i in range(len(__UpperCAmelCase) - 1):
if edges[i][2] >= edges[i + 1][2]:
a__: Any = edges[i][2] + 1
for edge in edges:
a__ , a__ , a__: Union[str, Any] = edge
a__: str = weight
a__: Dict = weight
def __str__( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
a__: Tuple = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip('\n')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowerCamelCase_ ( lowercase=None , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = Graph()
if vertices is None:
a__: Union[str, Any] = []
if edges is None:
a__: Any = []
for vertex in vertices:
g.add_vertex(__UpperCAmelCase)
for edge in edges:
g.add_edge(*__UpperCAmelCase)
return g
class __snake_case :
def __init__( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = {}
a__: Optional[Any] = {}
def __len__( self) -> str:
'''simple docstring'''
return len(self.parent)
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
if item in self.parent:
return self.find(__UpperCAmelCase)
a__: Optional[Any] = item
a__: str = 0
return item
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
if item not in self.parent:
return self.make_set(__UpperCAmelCase)
if item != self.parent[item]:
a__: Tuple = self.find(self.parent[item])
return self.parent[item]
def lowerCamelCase_ ( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = self.find(__UpperCAmelCase)
a__: Dict = self.find(__UpperCAmelCase)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a__: Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
a__: Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a__: Any = roota
return roota
return None
@staticmethod
def lowerCamelCase_ ( lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: str = graph.num_vertices
a__: List[str] = Graph.UnionFind()
a__: int = []
while num_components > 1:
a__: int = {}
for vertex in graph.get_vertices():
a__: Dict = -1
a__: int = graph.get_edges()
for edge in edges:
a__ , a__ , a__: Dict = edge
edges.remove((tail, head, weight))
for edge in edges:
a__ , a__ , a__: Dict = edge
a__: Tuple = union_find.find(__UpperCAmelCase)
a__: List[str] = union_find.find(__UpperCAmelCase)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__: Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__: List[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a__ , a__ , a__: Optional[int] = cheap_edge[vertex]
if union_find.find(__UpperCAmelCase) != union_find.find(__UpperCAmelCase):
union_find.union(__UpperCAmelCase , __UpperCAmelCase)
mst_edges.append(cheap_edge[vertex])
a__: int = num_components - 1
a__: str = Graph.build(edges=__UpperCAmelCase)
return mst
| 290 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''OwlViTImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : str ) -> Union[str, Any]:
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
_UpperCamelCase = kwargs.pop('''feature_extractor''' )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]="max_length" , __UpperCamelCase : Optional[Any]="np" , **__UpperCamelCase : str ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
_UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
_UpperCamelCase = []
# Maximum number of queries across batch
_UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
_UpperCamelCase = t + [''' '''] * (max_num_queries - len(__UpperCAmelCase ))
_UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
_UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
_UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = input_ids
_UpperCamelCase = attention_mask
if query_images is not None:
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
_UpperCamelCase = query_pixel_values
if images is not None:
_UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _UpperCamelCase ( self : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Dict ) -> Any:
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> int:
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCamelCase ( self : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : str ) -> List[str]:
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCamelCase ( self : Dict , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCamelCase ( self : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[int] ) -> Union[str, Any]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCamelCase ( self : Tuple ) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : List[str] ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 256 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = cva.getAffineTransform(lowerCamelCase , lowerCamelCase )
return cva.warpAffine(lowerCamelCase , lowerCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
snake_case__ : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
snake_case__ : Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ : Dict = gray_img.shape
# set different points to rotate image
snake_case__ : Tuple = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
snake_case__ : str = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
snake_case__ : Tuple = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
snake_case__ : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : str = plt.figure(1)
snake_case__ : Tuple = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 117 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.