code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
# Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
__snake_case : Optional[int] = module
__snake_case : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__snake_case : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase_ ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = "bigscience/bloom-1b7"
# Constant values
__UpperCAmelCase = 2.109659552692574
__UpperCAmelCase = "Hello my name is"
__UpperCAmelCase = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University")
__UpperCAmelCase = 1_0
def lowercase_ ( self ):
# Models and tokenizer
__snake_case : str = AutoTokenizer.from_pretrained(self.model_name )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
super().setUp()
# Models and tokenizer
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def lowercase_ ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : Any = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__snake_case : Dict = config.to_dict()
__snake_case : List[str] = config.to_diff_dict()
__snake_case : Optional[Any] = config.to_json_string()
def lowercase_ ( self ):
from bitsandbytes.nn import Paramsabit
__snake_case : Tuple = self.model_fpaa.get_memory_footprint()
__snake_case : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case : Any = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase_ ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase_ ( self ):
__snake_case : str = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case : Optional[int] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = BitsAndBytesConfig()
__snake_case : int = True
__snake_case : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__snake_case : int = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case : Any = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowercase_ ( self ):
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def lowercase_ ( self ):
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case : Tuple = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case : str = self.model_fpaa.to(torch.floataa )
__snake_case : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case : int = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case : int = self.model_fpaa.half()
# Check this does not throw an error
__snake_case : str = self.model_fpaa.float()
def lowercase_ ( self ):
__snake_case : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def lowercase_ ( cls ):
__snake_case : str = 't5-small'
__snake_case : int = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case : Any = 'Translate in German: Hello, my dog is cute'
def lowercase_ ( self ):
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
from transformers import TaForConditionalGeneration
__snake_case : Optional[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case : List[Any] = None
# test with `t5-small`
__snake_case : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__snake_case : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case : List[Any] = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__snake_case : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__snake_case : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case : Optional[int] = model.generate(**_UpperCAmelCase )
__snake_case : int = modules
def lowercase_ ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case : str = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__snake_case : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__snake_case : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case : Union[str, Any] = model.generate(**_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
super().setUp()
# model_name
__snake_case : Optional[Any] = 'bigscience/bloom-560m'
__snake_case : Dict = 't5-small'
# Different types of model
__snake_case : int = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__snake_case : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__snake_case : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__snake_case : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def lowercase_ ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
super().setUp()
def lowercase_ ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : Tuple = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
super().setUp()
def lowercase_ ( self ):
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case : Dict = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case : List[Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : str = 'facebook/opt-350m'
super().setUp()
def lowercase_ ( self ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case : Union[str, Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case : Union[str, Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__snake_case : List[Any] = LoRALayer(module.q_proj , rank=16 )
__snake_case : Optional[Any] = LoRALayer(module.k_proj , rank=16 )
__snake_case : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case : Optional[int] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case : List[str] = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "gpt2-xl"
__UpperCAmelCase = 3.3191854854152187
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase__( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__snake_case : Optional[int] = precision
__snake_case : List[str] = ceil(precision / 14 )
__snake_case : Optional[int] = 42_68_80 * Decimal(1_00_05 ).sqrt()
__snake_case : List[str] = 1
__snake_case : Dict = 13_59_14_09
__snake_case : Tuple = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
__snake_case : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__ = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__magic_name__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : Any = torch.manual_seed(0 )
__snake_case : Optional[int] = pipe.dual_guided(
prompt='first prompt' , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[int] = generator.manual_seed(0 )
__snake_case : List[Any] = pipe.dual_guided(
prompt='first prompt' , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self ):
__snake_case : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 'cyberpunk 2077'
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : Any = torch.manual_seed(0 )
__snake_case : Any = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case : List[str] = 'A painting of a squirrel eating a burger '
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : int = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case : Optional[Any] = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type='numpy' ).images
__snake_case : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="None" , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : str = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : str = num_labels
__snake_case : Any = num_choices
__snake_case : Any = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Tuple = pos_att_type
__snake_case : List[Any] = scope
def lowercase_ ( self ):
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Dict = None
__snake_case : Tuple = None
__snake_case : str = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self , _UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = DebertaVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__snake_case : List[str] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__snake_case : str = model(_UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = DebertaVaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : Any = DebertaVaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DebertaVaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = DebertaVaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = DebertaVaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
__snake_case
) : Optional[int] = config_and_inputs
__snake_case : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = DebertaVaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = DebertaVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ):
pass
@slow
def lowercase_ ( self ):
__snake_case : Optional[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__snake_case : Optional[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__snake_case : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
# compare the actual values for a slice.
__snake_case : Dict = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = 4_2
__UpperCAmelCase = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
# Base Case
if curr_ind == len(__UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCAmelCase ) ):
if valid_connection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Insert current vertex into path as next transition
__snake_case : Optional[int] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCAmelCase , __UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
__snake_case : Dict = -1
return False
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : int = 0 ):
__snake_case : int = [-1] * (len(__UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
__snake_case : Union[str, Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCAmelCase , __UpperCAmelCase , 1 ) else []
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
def UpperCAmelCase__( ):
__snake_case : int = []
__snake_case : str = 1
while len(__UpperCAmelCase ) < 1E6:
constant.append(str(__UpperCAmelCase ) )
i += 1
__snake_case : str = ''.join(__UpperCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
__snake_case : List[Any] = [2, 1, 2, -1]
__snake_case : List[Any] = [1, 2, 3, 4]
def lowercase_ ( self ):
__snake_case : str = len(self.first_signal )
__snake_case : int = len(self.second_signal )
__snake_case : Dict = max(_UpperCAmelCase , _UpperCAmelCase )
# create a zero matrix of max_length x max_length
__snake_case : Dict = [[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
__snake_case : List[str] = deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__snake_case : List[Any] = np.matmul(np.transpose(_UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "donut-swin"
__UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : List[Any] = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : Tuple = num_channels
__snake_case : Dict = embed_dim
__snake_case : Tuple = depths
__snake_case : int = len(_UpperCAmelCase )
__snake_case : List[Any] = num_heads
__snake_case : Optional[Any] = window_size
__snake_case : Dict = mlp_ratio
__snake_case : Optional[Any] = qkv_bias
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : int = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Tuple = layer_norm_eps
__snake_case : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Any = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (EulerDiscreteScheduler,)
__UpperCAmelCase = 1_0
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : Optional[int] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowercase_ ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Tuple = self.dummy_model()
__snake_case : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Optional[int] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Any = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
__snake_case : int = output.prev_sample
__snake_case : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowercase_ ( self ):
__snake_case : int = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
__snake_case : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : List[Any] = torch.manual_seed(0 )
__snake_case : str = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Any = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Union[str, Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
__snake_case : str = output.prev_sample
__snake_case : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Any = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Tuple = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Dict = self.dummy_model()
__snake_case : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__snake_case : Any = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
__snake_case : str = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
__snake_case : List[Any] = output.prev_sample
__snake_case : str = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : Tuple = self.get_scheduler_config()
__snake_case : str = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
__snake_case : str = torch.manual_seed(0 )
__snake_case : List[str] = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__snake_case : Optional[int] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
__snake_case : List[Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Tuple = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
__snake_case : Tuple = output.prev_sample
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__magic_name__ = '''Usage of script: script_name <size_of_canvas:int>'''
__magic_name__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Tuple = [[False for i in range(__UpperCAmelCase )] for j in range(__UpperCAmelCase )]
return canvas
def UpperCAmelCase__( __UpperCAmelCase : list[list[bool]] ):
for i, row in enumerate(__UpperCAmelCase ):
for j, _ in enumerate(__UpperCAmelCase ):
__snake_case : int = bool(random.getrandbits(1 ) )
def UpperCAmelCase__( __UpperCAmelCase : list[list[bool]] ):
__snake_case : Any = np.array(__UpperCAmelCase )
__snake_case : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__UpperCAmelCase ):
for c, pt in enumerate(__UpperCAmelCase ):
__snake_case : Any = __judge_point(
__UpperCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCAmelCase__( __UpperCAmelCase : bool , __UpperCAmelCase : list[list[bool]] ):
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : List[Any] = pt
if pt:
if alive < 2:
__snake_case : List[Any] = False
elif alive == 2 or alive == 3:
__snake_case : List[str] = True
elif alive > 3:
__snake_case : int = False
else:
if alive == 3:
__snake_case : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__magic_name__ = int(sys.argv[1])
# main working structure of this module.
__magic_name__ = create_canvas(canvas_size)
seed(c)
__magic_name__ , __magic_name__ = plt.subplots()
fig.show()
__magic_name__ = ListedColormap(['''w''', '''k'''])
try:
while True:
__magic_name__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 10_00 ):
__snake_case : List[Any] = 1
__snake_case : Any = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__snake_case : list[int] = []
__snake_case : List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__snake_case : Optional[int] = len(__UpperCAmelCase )
__snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__snake_case : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) as f:
__snake_case : Optional[int] = json.load(__UpperCAmelCase )
__snake_case : Tuple = {}
__snake_case : List[str] = []
__snake_case : Optional[Any] = []
for key, info in class_info.items():
__snake_case : Any = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(__UpperCAmelCase ) )
__snake_case : Union[str, Any] = thing_ids
__snake_case : Union[str, Any] = class_names
return metadata
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=10 , _UpperCAmelCase=False , _UpperCAmelCase=255 , _UpperCAmelCase="shi-labs/oneformer_demo" , _UpperCAmelCase="ade20k_panoptic.json" , _UpperCAmelCase=10 , ):
__snake_case : Dict = parent
__snake_case : List[str] = batch_size
__snake_case : Any = num_channels
__snake_case : Any = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Tuple = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
__snake_case : Dict = do_normalize
__snake_case : Any = image_mean
__snake_case : Any = image_std
__snake_case : Dict = class_info_file
__snake_case : Any = prepare_metadata(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = num_text
__snake_case : Optional[Any] = repo_path
# for the post_process_functions
__snake_case : Optional[Any] = 2
__snake_case : List[str] = 10
__snake_case : int = 10
__snake_case : List[str] = 3
__snake_case : int = 4
__snake_case : str = num_labels
__snake_case : Optional[Any] = do_reduce_labels
__snake_case : Optional[int] = ignore_index
def lowercase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
__snake_case : Any = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__snake_case : Optional[int] = image.size
else:
__snake_case : List[Any] = image.shape[1], image.shape[2]
if w < h:
__snake_case : List[Any] = int(self.size['shortest_edge'] * h / w )
__snake_case : Optional[int] = self.size['shortest_edge']
elif w > h:
__snake_case : Union[str, Any] = self.size['shortest_edge']
__snake_case : Tuple = int(self.size['shortest_edge'] * w / h )
else:
__snake_case : List[Any] = self.size['shortest_edge']
__snake_case : int = self.size['shortest_edge']
else:
__snake_case : Optional[int] = []
for image in image_inputs:
__snake_case : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : Optional[Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__snake_case : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def lowercase_ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCAmelCase = image_processing_class
def lowercase_ ( self ):
__snake_case : Optional[Any] = OneFormerImageProcessorTester(self )
@property
def lowercase_ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'ignore_index' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'class_info_file' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'num_text' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'repo_path' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'metadata' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_reduce_labels' ) )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case : Dict = self.image_processing_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : str = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__snake_case : Dict = image_processor(
_UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case : Dict = self.image_processing_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__snake_case : Optional[Any] = image_processor(
_UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : Tuple = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case : Optional[int] = self.image_processing_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__snake_case : List[str] = image_processor(
_UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="np" ):
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__snake_case : Union[str, Any] = self.image_processing_tester.num_labels
__snake_case : str = None
__snake_case : Tuple = None
__snake_case : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase )
if with_segmentation_maps:
__snake_case : List[str] = num_labels
if is_instance_map:
__snake_case : str = list(range(_UpperCAmelCase ) ) * 2
__snake_case : Union[str, Any] = dict(enumerate(_UpperCAmelCase ) )
__snake_case : str = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__snake_case : Optional[Any] = [Image.fromarray(_UpperCAmelCase ) for annotation in annotations]
__snake_case : Dict = image_processor(
_UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , _UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=_UpperCAmelCase , pad_and_return_pixel_mask=_UpperCAmelCase , )
return inputs
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
def common(_UpperCAmelCase=False , _UpperCAmelCase=None ):
__snake_case : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=_UpperCAmelCase , is_instance_map=_UpperCAmelCase , segmentation_type=_UpperCAmelCase )
__snake_case : Union[str, Any] = inputs['mask_labels']
__snake_case : Any = inputs['class_labels']
__snake_case : Optional[int] = inputs['pixel_values']
__snake_case : str = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_UpperCAmelCase )
common(is_instance_map=_UpperCAmelCase , segmentation_type='pil' )
common(is_instance_map=_UpperCAmelCase , segmentation_type='pil' )
def lowercase_ ( self ):
__snake_case : List[Any] = np.zeros((20, 50) )
__snake_case : Any = 1
__snake_case : Optional[int] = 1
__snake_case : Dict = 1
__snake_case : Union[str, Any] = binary_mask_to_rle(_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : str = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__snake_case : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__snake_case : Any = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase , target_sizes=_UpperCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : List[str] = image_processor.post_process_instance_segmentation(_UpperCAmelCase , threshold=0 )
self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : List[str] = image_processor.post_process_panoptic_segmentation(_UpperCAmelCase , threshold=0 )
self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mobilenet_v2"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=1.0 , _UpperCAmelCase=8 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=32 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu6" , _UpperCAmelCase=True , _UpperCAmelCase=0.8 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.001 , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__snake_case : Optional[Any] = num_channels
__snake_case : str = image_size
__snake_case : List[Any] = depth_multiplier
__snake_case : Any = depth_divisible_by
__snake_case : Optional[int] = min_depth
__snake_case : Union[str, Any] = expand_ratio
__snake_case : Optional[int] = output_stride
__snake_case : Optional[int] = first_layer_is_expansion
__snake_case : int = finegrained_output
__snake_case : Any = hidden_act
__snake_case : Tuple = tf_padding
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = version.parse("1.11")
@property
def lowercase_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowercase_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowercase_ ( self ):
return 1E-4
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ) -> str:
__snake_case : Dict = [0] * no_of_processes
__snake_case : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCAmelCase ):
__snake_case : Dict = burst_time[i]
__snake_case : int = 0
__snake_case : Tuple = 0
__snake_case : Tuple = 9_99_99_99_99
__snake_case : List[str] = 0
__snake_case : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__snake_case : List[str] = remaining_time[j]
__snake_case : str = j
__snake_case : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__snake_case : Tuple = remaining_time[short]
if minm == 0:
__snake_case : Union[str, Any] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__snake_case : Tuple = False
# Find finish time of current process
__snake_case : List[Any] = increment_time + 1
# Calculate waiting time
__snake_case : str = finish_time - arrival_time[short]
__snake_case : List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
__snake_case : Any = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] ) -> Any:
__snake_case : Optional[Any] = [0] * no_of_processes
for i in range(__UpperCAmelCase ):
__snake_case : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ) -> Optional[int]:
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
for i in range(__UpperCAmelCase ):
__snake_case : Dict = total_waiting_time + waiting_time[i]
__snake_case : Any = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__magic_name__ , __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "openai/whisper-base"
__UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase = "transcriber"
__UpperCAmelCase = WhisperProcessor
__UpperCAmelCase = WhisperForConditionalGeneration
__UpperCAmelCase = ["audio"]
__UpperCAmelCase = ["text"]
def lowercase_ ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='pt' ).input_features
def lowercase_ ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0]
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__magic_name__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PegasusTokenizer
__UpperCAmelCase = PegasusTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Union[str, Any] = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase_ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def lowercase_ ( self ):
__snake_case : Optional[Any] = '</s>'
__snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(_UpperCAmelCase ) , 1_103 )
def lowercase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case : int = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__snake_case : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
__snake_case : Tuple = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__snake_case : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__snake_case : int = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__snake_case : Tuple = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__snake_case : List[Any] = 'To ensure a smooth flow of bank resolutions.'
__snake_case : Dict = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__snake_case : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase_ ( self ):
__snake_case : str = ['This is going to be way too long.' * 150, 'short example']
__snake_case : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
__snake_case : int = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' )
__snake_case : str = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowercase_ ( self ):
# fmt: off
__snake_case : Optional[int] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PegasusTokenizer
__UpperCAmelCase = PegasusTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Tuple = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase_ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def lowercase_ ( self ):
__snake_case : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__snake_case : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
__snake_case : List[str] = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowercase_ ( self ):
__snake_case : Union[str, Any] = ['This is going to be way too long.' * 1_000, 'short example']
__snake_case : List[str] = ['not super long but more than 5 tokens', 'tiny']
__snake_case : int = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' )
__snake_case : int = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowercase_ ( self ):
__snake_case : Union[str, Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__snake_case : str = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
__snake_case : int = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : Optional[Any] = use_attention_mask
__snake_case : Tuple = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : int = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : Optional[int] = num_choices
def lowercase_ ( self ):
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_attention_mask:
__snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ):
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case : Any = config_and_inputs
__snake_case : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case : Optional[Any] = config_and_inputs
__snake_case : Optional[int] = True
__snake_case : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ):
__snake_case : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ):
for model_class_name in self.all_model_classes:
__snake_case : List[Any] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase )
__snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase_ ( self ):
__snake_case : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase )
__snake_case : Dict = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__snake_case : Any = model(_UpperCAmelCase )[0]
__snake_case : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
__snake_case : str = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase )
__snake_case : Optional[Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__snake_case : Optional[Any] = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
__snake_case : List[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LayoutLMTokenizer
__UpperCAmelCase = LayoutLMTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self , **_UpperCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = 'UNwant\u00E9d,running'
__snake_case : Tuple = 'unwanted, running'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self ):
pass
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=100 , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=[0, 1, 2, 3] , ):
__snake_case : List[str] = parent
__snake_case : str = 100
__snake_case : List[Any] = batch_size
__snake_case : Dict = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = is_training
__snake_case : List[Any] = use_labels
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : List[str] = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : List[Any] = scope
__snake_case : str = out_indices
__snake_case : Any = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Tuple = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = num_patches + 1
def lowercase_ ( self ):
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Dict = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.type_sequence_label_size
__snake_case : Union[str, Any] = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : Optional[Any] = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[int] = self.num_labels
__snake_case : Dict = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__snake_case : int = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase_ ( self ):
__snake_case : str = self.prepare_config_and_inputs()
__snake_case : str = config_and_inputs
__snake_case : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Union[str, Any] = BeitModelTester(self )
__snake_case : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowercase_ ( self ):
if not self.model_tester.is_training:
return
__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__snake_case : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__snake_case : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__snake_case : str = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase_ ( self ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case : Dict = False
__snake_case : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : Optional[Any] = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
__snake_case : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__snake_case : Union[str, Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
__snake_case : int = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase_ ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Optional[int] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
__snake_case : List[str] = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
__snake_case : Any = outputs.logits
# verify the logits
__snake_case : int = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : List[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) )
@slow
def lowercase_ ( self ):
__snake_case : Union[str, Any] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
__snake_case : List[Any] = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : Dict = outputs.logits
# verify the logits
__snake_case : Dict = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Dict = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
__snake_case : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase_ ( self ):
__snake_case : Tuple = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
__snake_case : List[str] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : Dict = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**_UpperCAmelCase )
__snake_case : Any = outputs.logits
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Union[str, Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
__snake_case : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase_ ( self ):
__snake_case : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__snake_case : List[str] = model.to(_UpperCAmelCase )
__snake_case : List[Any] = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
__snake_case : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__snake_case : List[Any] = Image.open(ds[0]['file'] )
__snake_case : str = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : Tuple = outputs.logits
# verify the logits
__snake_case : Any = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : List[str] = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__snake_case : Any = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
__snake_case : str = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__snake_case : Tuple = model.to(_UpperCAmelCase )
__snake_case : List[Any] = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
__snake_case : int = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__snake_case : Optional[int] = Image.open(ds[0]['file'] )
__snake_case : int = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**_UpperCAmelCase )
__snake_case : Optional[Any] = outputs.logits.detach().cpu()
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
__snake_case : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : Union[str, Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 100 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ):
if audio_length_in_s is None:
__snake_case : str = self.unet.config.sample_size / self.unet.config.sample_rate
__snake_case : Any = audio_length_in_s * self.unet.config.sample_rate
__snake_case : Union[str, Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__snake_case : Dict = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
__snake_case : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
__snake_case : Union[str, Any] = int(_UpperCAmelCase )
__snake_case : Tuple = next(iter(self.unet.parameters() ) ).dtype
__snake_case : str = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__snake_case : Optional[int] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device )
__snake_case : List[str] = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : Dict = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__snake_case : Any = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
__snake_case : List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
__snake_case : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
import argparse
from collections import defaultdict
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ):
__snake_case : Union[str, Any] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , 'r' ) as f:
__snake_case : str = f.readlines()
__snake_case : Any = F"""class {class_name}("""
__snake_case : Any = F"""{4 * " "}def {test_name}("""
__snake_case : str = F"""{8 * " "}{correct_line.split()[0]}"""
__snake_case : List[str] = F"""{16 * " "}{correct_line.split()[0]}"""
__snake_case : Dict = False
__snake_case : Tuple = False
__snake_case : List[str] = False
__snake_case : Dict = False
__snake_case : Union[str, Any] = 0
__snake_case : str = 0
__snake_case : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
__snake_case : str = True
elif in_class and line.startswith(__UpperCAmelCase ):
__snake_case : Any = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
__snake_case : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__snake_case : str = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case : Union[str, Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
__snake_case : str = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ):
if fail is not None:
with open(__UpperCAmelCase , 'r' ) as f:
__snake_case : Dict = {l.strip() for l in f.readlines()}
else:
__snake_case : List[str] = None
with open(__UpperCAmelCase , 'r' ) as f:
__snake_case : List[Any] = f.readlines()
__snake_case : List[Any] = defaultdict(__UpperCAmelCase )
for line in correct_lines:
__snake_case : Tuple = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__magic_name__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCAmelCase__( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def UpperCAmelCase__( __UpperCAmelCase : int ):
print('Generating prime p...' )
__snake_case : Tuple = rabinMiller.generate_large_prime(__UpperCAmelCase )
print('Generating prime q...' )
__snake_case : int = rabinMiller.generate_large_prime(__UpperCAmelCase )
__snake_case : int = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__snake_case : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__UpperCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__snake_case : Optional[int] = cryptoMath.find_mod_inverse(__UpperCAmelCase , (p - 1) * (q - 1) )
__snake_case : Optional[Any] = (n, e)
__snake_case : Tuple = (n, d)
return (public_key, private_key)
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : int ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__snake_case : Dict = generate_key(__UpperCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = WavaVecaPhonemeCTCTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : Union[str, Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__snake_case : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
__snake_case : Tuple = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )) for i in range(len(_UpperCAmelCase ) )]
__snake_case : int = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[str] = [t[0] for t in toks]
# Ensure consistency
__snake_case : List[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : Optional[Any] = ' ' + output_txt
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__snake_case : Optional[int] = tokenizer('m xxx ɪ' , do_phonemize=_UpperCAmelCase ).input_ids
self.assertEqual(_UpperCAmelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__snake_case : Tuple = tokenizer('m aaa ɪ ccc' , do_phonemize=_UpperCAmelCase ).input_ids
self.assertEqual(_UpperCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : Optional[int] = tokenizer('maɪ c' , do_phonemize=_UpperCAmelCase ).input_ids
self.assertEqual(_UpperCAmelCase , [3, 200] ) # mai should be <unk> (=3)
def lowercase_ ( self ):
__snake_case : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : str = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
self.assertEqual(_UpperCAmelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Union[str, Any] = 'Hello how are you'
__snake_case : str = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_UpperCAmelCase ).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase ).input_ids )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[Any] = 'Hello how are you'
__snake_case : Dict = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
__snake_case : Optional[Any] = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Optional[int] = tokenizer.decode(sample_ids[0] )
__snake_case : List[Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , batch_tokens[0] )
self.assertEqual(_UpperCAmelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def lowercase_ ( self ):
__snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : List[str] = 'Hello how are you'
__snake_case : str = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
self.assertEqual(_UpperCAmelCase , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def lowercase_ ( self ):
__snake_case : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : List[Any] = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_UpperCAmelCase ).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase ).input_ids )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__snake_case : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : List[Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , batch_tokens[0] )
self.assertEqual(_UpperCAmelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__snake_case : Tuple = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , batch_tokens[0] )
self.assertEqual(_UpperCAmelCase , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def lowercase_ ( self ):
__snake_case : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Any = 'Hello how are you'
__snake_case : Any = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
__snake_case : int = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids , filter_word_delimiter_token=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : Any = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='en-us' )
__snake_case : List[Any] = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids , filter_word_delimiter_token=_UpperCAmelCase )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=_UpperCAmelCase )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : Optional[Any] = tokenizer(_UpperCAmelCase , phonemizer_lang='en-us' ).input_ids
__snake_case : Optional[Any] = tokenizer(_UpperCAmelCase , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase )
__snake_case : Tuple = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(_UpperCAmelCase , 'ɛ l o h aʊ a ʁ j u' )
def lowercase_ ( self ):
__snake_case : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : str = 'Hello how Are you'
__snake_case : Optional[Any] = 'hello how are you'
__snake_case : str = tokenizer(_UpperCAmelCase ).input_ids
__snake_case : Tuple = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__snake_case : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__snake_case : Any = tokenizer.batch_decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : str = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : List[Any] = tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowercase_ ( self ):
__snake_case : Any = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , _UpperCAmelCase ) )
# transform list to ModelOutput
__snake_case : int = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
[recursive_check(_UpperCAmelCase , _UpperCAmelCase ) for la, la in zip(_UpperCAmelCase , _UpperCAmelCase )]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : int = tokenizer.batch_decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase )
__snake_case : List[Any] = [tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def lowercase_ ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def lowercase_ ( self ):
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def lowercase_ ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Dict = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Any = tokenizer.vocab_size
__snake_case : Optional[Any] = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__snake_case : List[str] = tokenizer.add_tokens(_UpperCAmelCase )
__snake_case : int = tokenizer.vocab_size
__snake_case : Optional[int] = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
__snake_case : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__snake_case : Tuple = tokenizer.add_special_tokens(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : Dict = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
__snake_case : Dict = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def lowercase_ ( self ):
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Tuple = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__snake_case : Any = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(output['text'] , _UpperCAmelCase )
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not is_accelerate_available():
return method
__snake_case : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self : str , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Union[str, Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
__magic_name__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
__magic_name__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
__magic_name__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="uniform_average" , _UpperCAmelCase=True ):
__snake_case : int = mean_squared_error(
_UpperCAmelCase , _UpperCAmelCase , sample_weight=_UpperCAmelCase , multioutput=_UpperCAmelCase , squared=_UpperCAmelCase )
return {"mse": mse} | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : int = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__snake_case : Tuple = DetaConfig(
backbone_config=__UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__UpperCAmelCase , with_box_refine=__UpperCAmelCase , two_stage=__UpperCAmelCase , )
# set labels
__snake_case : int = 'huggingface/label-files'
if "o365" in model_name:
__snake_case : List[str] = 3_66
__snake_case : str = 'object365-id2label.json'
else:
__snake_case : str = 91
__snake_case : Optional[int] = 'coco-detection-id2label.json'
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
__snake_case : Union[str, Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict ):
__snake_case : str = dct.pop(__UpperCAmelCase )
__snake_case : List[str] = val
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ):
__snake_case : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : List[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__snake_case : Any = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[:dim, :]
__snake_case : Any = in_proj_bias[: dim]
__snake_case : List[str] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : str = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
# transformer decoder self-attention layers
__snake_case : Dict = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__snake_case : Optional[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Union[str, Any] = in_proj_weight[:hidden_size, :]
__snake_case : Any = in_proj_bias[:hidden_size]
__snake_case : Tuple = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__snake_case : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Any = in_proj_weight[-hidden_size:, :]
__snake_case : Tuple = in_proj_bias[-hidden_size:]
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : str = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ):
__snake_case : Any = get_deta_config(__UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__snake_case : Optional[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__snake_case : Any = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
__snake_case : Tuple = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__UpperCAmelCase , param.shape )
# rename keys
__snake_case : List[str] = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_swin_q_k_v(__UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__snake_case : List[str] = state_dict.pop(__UpperCAmelCase )
__snake_case : str = val
if "input_proj" in key:
__snake_case : Union[str, Any] = state_dict.pop(__UpperCAmelCase )
__snake_case : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__snake_case : int = state_dict.pop(__UpperCAmelCase )
__snake_case : Dict = val
# finally, create HuggingFace model and load state dict
__snake_case : Dict = DetaForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
__snake_case : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__UpperCAmelCase )
# load image processor
__snake_case : int = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__snake_case : int = prepare_img()
__snake_case : Any = processor(images=__UpperCAmelCase , return_tensors='pt' )
__snake_case : Dict = encoding['pixel_values']
__snake_case : Union[str, Any] = model(pixel_values.to(__UpperCAmelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__snake_case : List[Any] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__snake_case : Union[str, Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__snake_case : Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__snake_case : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__UpperCAmelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__UpperCAmelCase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__snake_case : Any = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = 'sshleifer/tiny-gpt2'
__snake_case : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Any = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = 'sgugger/tiny-distilbert-classification'
__snake_case : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Dict = 'sshleifer/tiny-gpt2'
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Dict = 'sshleifer/tiny-gpt2'
__snake_case : Any = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Dict = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
__snake_case : int = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Optional[Any] = 'sshleifer/tiny-gpt2'
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
__snake_case : str = 'sshleifer/tiny-gpt2'
__snake_case : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
__snake_case : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__snake_case : int = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase , configs=[config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
__snake_case : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_UpperCAmelCase , 'env.csv' ) , multi_process=_UpperCAmelCase , )
__snake_case : str = TensorFlowBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'env.csv' ) ).exists() )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_UpperCAmelCase ):
self.assertTrue(hasattr(_UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , 'log.txt' ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'log.txt' ) ).exists() )
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : int = r'\w+[.]\d+'
__snake_case : List[Any] = re.findall(__UpperCAmelCase , __UpperCAmelCase )
for pat in pats:
__snake_case : Any = key.replace(__UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ):
__snake_case : str = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__snake_case : int = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__snake_case : Dict = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__snake_case : Tuple = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case : str = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__snake_case : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case : Dict = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
__snake_case : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case : int = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case : Any = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=42 ):
# Step 1: Convert pytorch tensor to numpy
__snake_case : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__snake_case : Any = flax_model.init_weights(PRNGKey(__UpperCAmelCase ) )
__snake_case : Dict = flatten_dict(__UpperCAmelCase )
__snake_case : Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case : str = rename_key(__UpperCAmelCase )
__snake_case : str = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
__snake_case : Any = rename_key_and_reshape_tensor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__snake_case : Optional[Any] = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCAmelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__snake_case : str = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__snake_case : Union[str, Any] = PipelineDataFormat.from_str(
format=__UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCAmelCase , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = nlp
__snake_case : Optional[Any] = reader
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
__snake_case : Optional[Any] = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=_UpperCAmelCase , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=_UpperCAmelCase , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=_UpperCAmelCase , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=_UpperCAmelCase , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=_UpperCAmelCase , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=_UpperCAmelCase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=_UpperCAmelCase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=_UpperCAmelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self._nlp, []
for entry in self._reader:
__snake_case : Union[str, Any] = nlp(**_UpperCAmelCase ) if self._reader.is_multi_columns else nlp(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
outputs.append(_UpperCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__snake_case : Tuple = self._reader.save_binary(_UpperCAmelCase )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_UpperCAmelCase )
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__magic_name__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__magic_name__ = [0, 25, 50]
__magic_name__ = [25, 50, 75]
__magic_name__ = fuzz.membership.trimf(X, abca)
__magic_name__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__magic_name__ = np.ones(75)
__magic_name__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__magic_name__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__magic_name__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__magic_name__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__magic_name__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = inspect.getfile(accelerate.test_utils )
__snake_case : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__snake_case : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase_ ( self ):
__snake_case : Tuple = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__snake_case : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
__snake_case : List[str] = LxmertConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : Optional[int] = LxmertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
__snake_case : List[Any] = parent
__snake_case : str = batch_size
__snake_case : str = image_size
__snake_case : str = num_channels
__snake_case : Dict = embeddings_size
__snake_case : Dict = hidden_sizes
__snake_case : Optional[Any] = depths
__snake_case : List[str] = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = num_labels
__snake_case : Tuple = scope
__snake_case : Union[str, Any] = len(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = TFResNetModel(config=_UpperCAmelCase )
__snake_case : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = self.num_labels
__snake_case : List[str] = TFResNetForImageClassification(_UpperCAmelCase )
__snake_case : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : Optional[Any] = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Any = TFResNetModelTester(self )
__snake_case : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(_UpperCAmelCase )
__snake_case : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = model_class(_UpperCAmelCase )
__snake_case : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : int = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Tuple = layer_type
__snake_case : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ):
__snake_case : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Any = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='tf' )
# forward pass
__snake_case : List[Any] = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case : Optional[int] = divisor, remainder
__snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int = 10**12 ):
__snake_case : List[Any] = 1
__snake_case : str = 0
__snake_case : str = 1
__snake_case : int = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ):
__snake_case : Tuple = nn.functional.normalize(__UpperCAmelCase )
__snake_case : List[str] = nn.functional.normalize(__UpperCAmelCase )
return torch.mm(__UpperCAmelCase , normalized_text_embeds.t() )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = CLIPConfig
__UpperCAmelCase = ["CLIPEncoderLayer"]
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : int = CLIPVisionModel(config.vision_config )
__snake_case : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
__snake_case : int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
__snake_case : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
__snake_case : int = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
__snake_case : Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
__snake_case : int = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Tuple = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
__snake_case : Any = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
__snake_case : str = []
__snake_case : Dict = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
__snake_case : str = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__snake_case : Dict = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__snake_case : Optional[int] = special_cos_dist[i][concept_idx]
__snake_case : Tuple = self.special_care_embeds_weights[concept_idx].item()
__snake_case : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
__snake_case : List[str] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__snake_case : Dict = cos_dist[i][concept_idx]
__snake_case : Optional[int] = self.concept_embeds_weights[concept_idx].item()
__snake_case : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
__snake_case : Dict = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.vision_model(_UpperCAmelCase )[1] # pooled_output
__snake_case : int = self.visual_projection(_UpperCAmelCase )
__snake_case : Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
__snake_case : Union[str, Any] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__snake_case : Tuple = 0.0
__snake_case : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__snake_case : int = torch.any(special_scores > 0 , dim=1 )
__snake_case : Optional[Any] = special_care * 0.01
__snake_case : List[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__snake_case : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__snake_case : Dict = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__magic_name__ = {'''facebook/blenderbot_small-90M''': 512}
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Optional[Any] = set()
__snake_case : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : List[Any] = char
__snake_case : Dict = set(__UpperCAmelCase )
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="__start__" , _UpperCAmelCase="__end__" , _UpperCAmelCase="__unk__" , _UpperCAmelCase="__null__" , **_UpperCAmelCase , ):
super().__init__(unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__snake_case : List[Any] = json.load(_UpperCAmelCase )
__snake_case : Tuple = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
__snake_case : List[Any] = merges_handle.read().split('\n' )[1:-1]
__snake_case : List[Any] = [tuple(merge.split() ) for merge in merges]
__snake_case : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : int = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
def lowercase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , _UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
__snake_case : Tuple = re.sub('([.,!?()])' , R' \1' , _UpperCAmelCase )
__snake_case : List[str] = re.sub('(\')' , R' \1 ' , _UpperCAmelCase )
__snake_case : Optional[Any] = re.sub(R'\s{2,}' , ' ' , _UpperCAmelCase )
if "\n" in token:
__snake_case : int = token.replace('\n' , ' __newln__' )
__snake_case : Optional[int] = token.split(' ' )
__snake_case : Tuple = []
for token in tokens:
if not len(_UpperCAmelCase ):
continue
__snake_case : List[str] = token.lower()
__snake_case : Optional[Any] = tuple(_UpperCAmelCase )
__snake_case : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__snake_case : Optional[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
words.append(_UpperCAmelCase )
continue
while True:
__snake_case : Union[str, Any] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case : List[Any] = bigram
__snake_case : Optional[int] = []
__snake_case : Optional[int] = 0
while i < len(_UpperCAmelCase ):
try:
__snake_case : List[Any] = word.index(_UpperCAmelCase , _UpperCAmelCase )
new_word.extend(word[i:j] )
__snake_case : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : int = tuple(_UpperCAmelCase )
__snake_case : Dict = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
__snake_case : Dict = get_pairs(_UpperCAmelCase )
__snake_case : Union[str, Any] = '@@ '.join(_UpperCAmelCase )
__snake_case : Optional[int] = word[:-4]
__snake_case : Union[str, Any] = word
words.append(_UpperCAmelCase )
return " ".join(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = []
__snake_case : int = re.findall(R'\S+\n?' , _UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(' ' ) ) )
return split_tokens
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = token.lower()
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = ' '.join(_UpperCAmelCase ).replace('@@ ' , '' ).strip()
return out_string
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : str = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
__snake_case : str = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__snake_case : Union[str, Any] = token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
try:
with open(__UpperCAmelCase , 'rb' ) as flax_state_f:
__snake_case : int = from_bytes(__UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case : Tuple = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case : Union[str, Any] = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
__snake_case : Optional[int] = ''
__snake_case : str = flatten_dict(__UpperCAmelCase , sep='.' )
__snake_case : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case : Tuple = []
__snake_case : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case : List[str] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case : int = flax_key_tuple_array[:-1] + ['weight']
__snake_case : Union[str, Any] = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case : str = flax_key_tuple_array[:-1] + ['weight']
__snake_case : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case : Dict = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCAmelCase ):
__snake_case : str = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case : Optional[Any] = '.'.join(__UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__snake_case : List[Any] = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
__snake_case : Optional[Any] = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
__snake_case : Tuple = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__UpperCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
__magic_name__ = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
__magic_name__ = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
__magic_name__ = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__snake_case : Tuple = new_id
# turn into Numpy arrays
__snake_case : Optional[int] = np.array(__UpperCAmelCase )
__snake_case : Optional[int] = np.array(__UpperCAmelCase )
if reduce_labels:
__snake_case : Tuple = 2_55
__snake_case : List[str] = label - 1
__snake_case : List[str] = 2_55
__snake_case : Optional[Any] = label != ignore_index
__snake_case : str = np.not_equal(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = pred_label[mask]
__snake_case : Union[str, Any] = np.array(__UpperCAmelCase )[mask]
__snake_case : Any = pred_label[pred_label == label]
__snake_case : int = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : List[str] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : Union[str, Any] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : Any = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
__snake_case : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : str = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ):
__snake_case : Dict = intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
__snake_case : int = total_intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# compute metrics
__snake_case : Optional[int] = {}
__snake_case : str = total_area_intersect.sum() / total_area_label.sum()
__snake_case : List[Any] = total_area_intersect / total_area_union
__snake_case : List[Any] = total_area_intersect / total_area_label
__snake_case : Union[str, Any] = np.nanmean(__UpperCAmelCase )
__snake_case : Optional[Any] = np.nanmean(__UpperCAmelCase )
__snake_case : Dict = all_acc
__snake_case : List[Any] = iou
__snake_case : int = acc
if nan_to_num is not None:
__snake_case : Tuple = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
__snake_case : Union[str, Any] = mean_iou(
results=_UpperCAmelCase , gt_seg_maps=_UpperCAmelCase , num_labels=_UpperCAmelCase , ignore_index=_UpperCAmelCase , nan_to_num=_UpperCAmelCase , label_map=_UpperCAmelCase , reduce_labels=_UpperCAmelCase , )
return iou_result
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , ):
if attention_mask is None:
__snake_case : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__snake_case : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__snake_case : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
__snake_case : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
__snake_case : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="relu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
__snake_case : Union[str, Any] = parent
__snake_case : str = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : str = use_labels
__snake_case : Tuple = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : int = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Dict = encoder_layerdrop
__snake_case : int = decoder_layerdrop
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = eos_token_id
__snake_case : Tuple = pad_token_id
__snake_case : Optional[int] = bos_token_id
def lowercase_ ( self ):
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = self.eos_token_id # Eos Token
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__snake_case : str = input_ids.clamp(self.pad_token_id + 1 )
__snake_case : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
__snake_case : int = self.get_config()
__snake_case : Dict = prepare_mam_aaa_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowercase_ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ):
__snake_case : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = MaMaaaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval()
__snake_case : str = inputs_dict['input_ids']
__snake_case : Union[str, Any] = inputs_dict['attention_mask']
__snake_case : Any = inputs_dict['head_mask']
# first forward pass
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__snake_case : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__snake_case : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state']
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = MaMaaaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__snake_case : str = model(**_UpperCAmelCase )
__snake_case : Dict = outputs.encoder_last_hidden_state
__snake_case : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = model.get_encoder()
encoder.save_pretrained(_UpperCAmelCase )
__snake_case : int = MaMaaaEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = model.get_decoder()
decoder.save_pretrained(_UpperCAmelCase )
__snake_case : Tuple = MaMaaaDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case : List[Any] = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowercase_ ( self ):
__snake_case : Union[str, Any] = MaMaaaModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__snake_case : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[int] = copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
if not self.is_encoder_decoder:
__snake_case : Optional[int] = inputs['input_ids']
del inputs["input_ids"]
else:
__snake_case : int = inputs['input_ids']
__snake_case : List[str] = inputs.get('decoder_input_ids' , _UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , _UpperCAmelCase )
__snake_case : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__snake_case : Tuple = wte(_UpperCAmelCase )
else:
__snake_case : List[Any] = wte(_UpperCAmelCase )
__snake_case : Optional[Any] = wte(_UpperCAmelCase )
with torch.no_grad():
model(**_UpperCAmelCase )[0]
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
__snake_case : Any = input_dict['input_ids']
__snake_case : List[Any] = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Optional[int] = MaMaaaForConditionalGeneration(_UpperCAmelCase ).eval().to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
model.generate(num_beams=4 , do_sample=_UpperCAmelCase , early_stopping=_UpperCAmelCase , num_return_sequences=3 )
def UpperCAmelCase__( __UpperCAmelCase : str ):
return torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
__magic_name__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def lowercase_ ( self ):
__snake_case : int = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
__snake_case : Optional[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__snake_case : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__snake_case : int = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )[0]
__snake_case : Tuple = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
__snake_case : Optional[int] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
# change to intended input
__snake_case : Union[str, Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__snake_case : str = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__snake_case : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
__snake_case : List[str] = model(**_UpperCAmelCase )[0]
__snake_case : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
__snake_case : Optional[Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Any = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
__snake_case : Optional[Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__snake_case : int = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
__snake_case : str = model.generate(
input_ids=dct['input_ids'].to(_UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(_UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
__snake_case : Tuple = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
__snake_case : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
assert generated == expected_en
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__magic_name__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ):
inspect_dataset(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = path + '.py'
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ):
inspect_metric(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = path + '.py'
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
__snake_case : Dict = get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any ):
with pytest.raises(__UpperCAmelCase ):
get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
__snake_case : Any = get_dataset_config_names(__UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ):
__snake_case : str = get_dataset_infos(__UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
__snake_case : Tuple = expected_configs[0]
assert expected_config in infos
__snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ):
__snake_case : List[Any] = get_dataset_infos(__UpperCAmelCase )
assert expected_config in infos
__snake_case : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with pytest.raises(__UpperCAmelCase ):
get_dataset_split_names(__UpperCAmelCase , config_name=__UpperCAmelCase )
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : Optional[int] = parent
__snake_case : int = batch_size
__snake_case : str = seq_length
__snake_case : List[str] = is_training
__snake_case : int = use_input_mask
__snake_case : Optional[int] = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : List[Any] = vocab_size
__snake_case : Any = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Optional[Any] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Any = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : Union[str, Any] = num_choices
__snake_case : Tuple = scope
def lowercase_ ( self ):
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[Any] = None
__snake_case : Dict = None
__snake_case : str = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=_UpperCAmelCase , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = OpenLlamaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__snake_case : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = OpenLlamaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
__snake_case : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__snake_case : Optional[Any] = OpenLlamaForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__snake_case : Any = True
__snake_case : int = True
__snake_case : Optional[int] = OpenLlamaForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
__snake_case : int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
__snake_case : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
__snake_case : Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
__snake_case : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
__snake_case
) : List[str] = config_and_inputs
__snake_case : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Any = OpenLlamaModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : List[Any] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : Optional[int] = input_dict['input_ids']
__snake_case : Optional[Any] = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : List[str] = OpenLlamaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = 3
__snake_case : Optional[Any] = 'single_label_classification'
__snake_case : List[str] = input_dict['input_ids']
__snake_case : Optional[int] = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Any = OpenLlamaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : List[Any] = 'multi_label_classification'
__snake_case : str = input_dict['input_ids']
__snake_case : str = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : List[Any] = OpenLlamaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def lowercase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = ids_tensor([1, 10] , config.vocab_size )
__snake_case : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : List[Any] = OpenLlamaModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
__snake_case : Dict = original_model(_UpperCAmelCase ).last_hidden_state
__snake_case : str = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : List[Any] = {'type': scaling_type, 'factor': 10.0}
__snake_case : List[Any] = OpenLlamaModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
__snake_case : Any = scaled_model(_UpperCAmelCase ).last_hidden_state
__snake_case : Tuple = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
from random import randint, random
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 5 , ):
__snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = max(__UpperCAmelCase , 0 )
while i < number_of_cells:
__snake_case : Optional[Any] = (
randint(0 , __UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase__( __UpperCAmelCase : list , __UpperCAmelCase : int ):
__snake_case : Optional[int] = 0
__snake_case : Dict = highway_now[car_index + 1 :]
for cell in range(len(__UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCAmelCase , -1 )
def UpperCAmelCase__( __UpperCAmelCase : list , __UpperCAmelCase : float , __UpperCAmelCase : int ):
__snake_case : Optional[int] = len(__UpperCAmelCase )
# Beforce calculations, the highway is empty
__snake_case : Optional[Any] = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__snake_case : int = min(highway_now[car_index] + 1 , __UpperCAmelCase )
# Number of empty cell before the next car
__snake_case : Union[str, Any] = get_distance(__UpperCAmelCase , __UpperCAmelCase ) - 1
# We can't have the car causing an accident
__snake_case : int = min(next_highway[car_index] , __UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
__snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase__( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : int ):
__snake_case : Any = len(highway[0] )
for i in range(__UpperCAmelCase ):
__snake_case : Optional[Any] = update(highway[i] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : Dict = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
__snake_case : str = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__snake_case : int = (car_index + speed) % number_of_cells
# Commit the change of position
__snake_case : Tuple = speed
highway.append(__UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : Tuple = parent
__snake_case : int = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Any = is_training
__snake_case : Tuple = use_input_mask
__snake_case : List[Any] = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : int = vocab_size
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Optional[int] = scope
__snake_case : Tuple = vocab_size - 1
def lowercase_ ( self ):
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_input_mask:
__snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[str] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : List[str] = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__snake_case : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = True
__snake_case : int = GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Dict = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = self.num_labels
__snake_case : Tuple = GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = self.num_labels
__snake_case : Optional[int] = GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = self.num_labels
__snake_case : Optional[int] = GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[int] = True
__snake_case : List[Any] = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__snake_case : Optional[int] = output_from_no_past['hidden_states'][0]
__snake_case : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
__snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case : str = config_and_inputs
__snake_case : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Tuple = GPTNeoXModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# This regression test was failing with PyTorch < 1.3
__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowercase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Tuple = GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
__snake_case : Optional[Any] = original_model(_UpperCAmelCase ).last_hidden_state
__snake_case : List[str] = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = {'type': scaling_type, 'factor': 10.0}
__snake_case : Optional[Any] = GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
__snake_case : Tuple = scaled_model(_UpperCAmelCase ).last_hidden_state
__snake_case : Dict = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase_ ( self ):
__snake_case : Any = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__snake_case : Union[str, Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
__snake_case : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case : Tuple = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__snake_case : Optional[Any] = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 )
__snake_case : str = tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__magic_name__ = {'''mobilebert-uncased''': 512}
__magic_name__ = {}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Union[str, Any] = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__snake_case : Tuple = do_lower_case
__snake_case : int = strip_accents
__snake_case : Optional[Any] = tokenize_chinese_chars
__snake_case : List[str] = normalizer_class(**_UpperCAmelCase )
__snake_case : Union[str, Any] = do_lower_case
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Dict = [self.sep_token_id]
__snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : List[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase ) | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
__snake_case : Optional[int] = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__snake_case : int = input_file.read()
__snake_case : List[Any] = regexp.search(_UpperCAmelCase )
return match
def lowercase_ ( self , _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
__snake_case : Any = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
__snake_case : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case : str = regexp.finditer(_UpperCAmelCase )
__snake_case : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowercase_ ( self ):
__snake_case : Dict = Path('./datasets' )
__snake_case : List[str] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowercase_ ( self ):
__snake_case : Optional[int] = Path('./datasets' )
__snake_case : Optional[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase__( __UpperCAmelCase : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase__( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Optional[Any] = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
__snake_case : Optional[int] = [1, 2]
__snake_case : List[Any] = {'a': 1, 'b': 2}
__snake_case : Optional[int] = {'a': [1, 2], 'b': [3, 4]}
__snake_case : List[str] = {'a': {'1': 1}, 'b': 2}
__snake_case : str = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__snake_case : List[Any] = [2, 3]
__snake_case : Optional[Any] = {'a': 2, 'b': 3}
__snake_case : str = {'a': [2, 3], 'b': [4, 5]}
__snake_case : Any = {'a': {'1': 2}, 'b': 3}
__snake_case : List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__magic_name__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
__magic_name__ = {F'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = FunnelTokenizer
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = 2
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase="##" , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , clean_text=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , wordpieces_prefix=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Any = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__snake_case : Tuple = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[Any] = tokenize_chinese_chars
__snake_case : Dict = normalizer_class(**_UpperCAmelCase )
__snake_case : Dict = do_lower_case
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str ):
def get_masked_lm_array(__UpperCAmelCase : str ):
__snake_case : List[Any] = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : str = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_array(__UpperCAmelCase : str ):
__snake_case : Any = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[Any] = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : str = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_layer_array(__UpperCAmelCase : int , __UpperCAmelCase : str ):
__snake_case : Tuple = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : List[Any] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_attention_layer_array(__UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Any ):
__snake_case : Any = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : int = array.reshape(__UpperCAmelCase )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
print(F"""Loading model based on config from {config_path}...""" )
__snake_case : int = BertConfig.from_json_file(__UpperCAmelCase )
__snake_case : Dict = BertForMaskedLM(__UpperCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : Dict = get_encoder_attention_layer_array(
__UpperCAmelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
__snake_case : Optional[int] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : List[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
__UpperCAmelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
__snake_case : List[Any] = get_encoder_layer_array(__UpperCAmelCase , '_attention_layer_norm/gamma' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_attention_layer_norm/beta' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : Any = get_encoder_layer_array(__UpperCAmelCase , '_intermediate_dense/kernel' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_intermediate_dense/bias' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : Any = get_encoder_layer_array(__UpperCAmelCase , '_output_dense/kernel' )
__snake_case : List[str] = get_encoder_layer_array(__UpperCAmelCase , '_output_dense/bias' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_output_layer_norm/gamma' )
__snake_case : Tuple = get_encoder_layer_array(__UpperCAmelCase , '_output_layer_norm/beta' )
# Embeddings
__snake_case : Dict = get_encoder_array('_position_embedding_layer/embeddings' )
__snake_case : Dict = get_encoder_array('_type_embedding_layer/embeddings' )
__snake_case : Dict = get_encoder_array('_embedding_norm_layer/gamma' )
__snake_case : Optional[int] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
__snake_case : Tuple = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('dense/kernel' )
__snake_case : Optional[int] = get_masked_lm_array('dense/bias' )
__snake_case : List[Any] = get_masked_lm_array('layer_norm/gamma' )
__snake_case : int = get_masked_lm_array('layer_norm/beta' )
__snake_case : List[Any] = get_masked_lm_array('embedding_table' )
# Pooling
__snake_case : Tuple = BertPooler(config=__UpperCAmelCase )
__snake_case : BertPooler = get_encoder_array('_pooler_layer/kernel' )
__snake_case : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__UpperCAmelCase )
# Integration test - should load without any errors ;)
__snake_case : Tuple = BertForMaskedLM.from_pretrained(__UpperCAmelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__magic_name__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__magic_name__ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
__magic_name__ = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Dict = list(state_dict.keys() )
for name in state_dict_keys:
__snake_case : Tuple = state_dict.pop(__UpperCAmelCase )
# emb -> embedding
if name.startswith('emb.' ):
__snake_case : Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__snake_case : List[str] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__snake_case : Optional[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , __UpperCAmelCase )
# ffn -> feed_forward
__snake_case : int = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , __UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__snake_case : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__snake_case : List[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__snake_case : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__snake_case : str = 'rwkv.' + name
__snake_case : Any = weight
return state_dict
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__snake_case : List[Any] = 5_02_77
__snake_case : Tuple = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__snake_case : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=__UpperCAmelCase )
__snake_case : Optional[int] = len(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
# 2. Build the config
__snake_case : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__snake_case : int = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
__snake_case : Tuple = RwkvConfig(
vocab_size=__UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__UpperCAmelCase )
# 3. Download model file then convert state_dict
__snake_case : Optional[int] = hf_hub_download(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Dict = torch.load(__UpperCAmelCase , map_location='cpu' )
__snake_case : List[str] = convert_state_dict(__UpperCAmelCase )
# 4. Split in shards and save
__snake_case : Optional[Any] = shard_checkpoint(__UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if index is not None:
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
# Save the index as well
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
__snake_case : Optional[int] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__snake_case : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__snake_case : str = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__magic_name__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__magic_name__ = get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=0 ):
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case : Optional[Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case : Dict = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__snake_case : List[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case : Union[str, Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case : Union[str, Any] = os.path.join(__UpperCAmelCase , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
logger.info(F"""Saving model to {ckpt_dir}""" )
__snake_case : Optional[Any] = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__snake_case : str = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading model from {input_model_file}""" )
__snake_case : List[Any] = torch.load(__UpperCAmelCase )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__snake_case : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading model from {input_model_file}""" )
__snake_case : Tuple = torch.load(__UpperCAmelCase )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case : Tuple = (
os.path.join(__UpperCAmelCase , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__snake_case : Union[str, Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case : Union[str, Any] = state_dict['model']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=0 ):
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case : Tuple = FSDP.optim_state_dict(__UpperCAmelCase , __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case : Optional[int] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__snake_case : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__snake_case : str = os.path.join(__UpperCAmelCase , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case : Any = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case : Tuple = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__snake_case : Dict = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__snake_case : str = torch.load(__UpperCAmelCase )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__snake_case : Optional[Any] = (
os.path.join(__UpperCAmelCase , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__snake_case : str = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ) , )
__snake_case : Union[str, Any] = optim_state['optimizer']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__snake_case : Optional[Any] = FSDP.optim_state_dict_to_load(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
class __SCREAMING_SNAKE_CASE : # Public class to implement a graph
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = row
__snake_case : Optional[int] = col
__snake_case : str = graph
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Checking all 8 elements surrounding nth element
__snake_case : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__snake_case : Optional[int] = [-1, 0, 1, -1, 1, -1, 0, 1]
__snake_case : List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase )
def lowercase_ ( self ): # And finally, count all islands.
__snake_case : Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )]
__snake_case : int = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
count += 1
return count
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline
__UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 100
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : List[str] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__snake_case : str = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.dummy_unet
__snake_case : Dict = self.dummy_movq
__snake_case : Optional[Any] = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__snake_case : Dict = DDIMScheduler(**_UpperCAmelCase )
__snake_case : int = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
__snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__snake_case : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create init_image
__snake_case : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Any = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((256, 256) )
# create hint
__snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Any = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Dict = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[Any] = 'cpu'
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**_UpperCAmelCase )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : List[Any] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Optional[Any] = output.images
__snake_case : Dict = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : List[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__snake_case : str = init_image.resize((512, 512) )
__snake_case : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__snake_case : List[str] = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0
__snake_case : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__snake_case : int = 'A robot, 4k photo'
__snake_case : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
__snake_case : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__snake_case : List[str] = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe_prior(
_UpperCAmelCase , image=_UpperCAmelCase , strength=0.85 , generator=_UpperCAmelCase , negative_prompt='' , ).to_tuple()
__snake_case : str = pipeline(
image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
__snake_case : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__snake_case : List[str] = size if size is not None else {'shortest_edge': 18}
__snake_case : Optional[int] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : Optional[int] = parent
__snake_case : int = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : int = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : Optional[Any] = do_center_crop
__snake_case : Optional[int] = crop_size
__snake_case : str = do_normalize
__snake_case : Tuple = image_mean
__snake_case : Optional[Any] = image_std
def lowercase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
__snake_case : List[Any] = LevitImageProcessingTester(self )
@property
def lowercase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def lowercase_ ( self ):
__snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__snake_case : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "wav2vec2"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="sum" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1_500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
__snake_case : List[str] = hidden_size
__snake_case : int = feat_extract_norm
__snake_case : str = feat_extract_activation
__snake_case : Union[str, Any] = list(_UpperCAmelCase )
__snake_case : Optional[Any] = list(_UpperCAmelCase )
__snake_case : Dict = list(_UpperCAmelCase )
__snake_case : Any = conv_bias
__snake_case : List[Any] = num_conv_pos_embeddings
__snake_case : Union[str, Any] = num_conv_pos_embedding_groups
__snake_case : int = len(self.conv_dim )
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Tuple = hidden_dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : str = activation_dropout
__snake_case : Optional[int] = feat_proj_dropout
__snake_case : Optional[int] = final_dropout
__snake_case : str = layerdrop
__snake_case : List[Any] = layer_norm_eps
__snake_case : Dict = initializer_range
__snake_case : str = vocab_size
__snake_case : Dict = do_stable_layer_norm
__snake_case : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : str = apply_spec_augment
__snake_case : Tuple = mask_time_prob
__snake_case : int = mask_time_length
__snake_case : List[str] = mask_time_min_masks
__snake_case : List[str] = mask_feature_prob
__snake_case : Union[str, Any] = mask_feature_length
__snake_case : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : Optional[int] = num_codevectors_per_group
__snake_case : Any = num_codevector_groups
__snake_case : str = contrastive_logits_temperature
__snake_case : List[Any] = feat_quantizer_dropout
__snake_case : int = num_negatives
__snake_case : Dict = codevector_dim
__snake_case : Tuple = proj_codevector_dim
__snake_case : Optional[Any] = diversity_loss_weight
# ctc loss
__snake_case : str = ctc_loss_reduction
__snake_case : List[str] = ctc_zero_infinity
# adapter
__snake_case : Optional[Any] = add_adapter
__snake_case : Dict = adapter_kernel_size
__snake_case : List[str] = adapter_stride
__snake_case : Optional[int] = num_adapter_layers
__snake_case : Any = output_hidden_size or hidden_size
__snake_case : Union[str, Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : Dict = list(_UpperCAmelCase )
__snake_case : Optional[int] = list(_UpperCAmelCase )
__snake_case : Optional[Any] = list(_UpperCAmelCase )
__snake_case : Any = xvector_output_dim
@property
def lowercase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__magic_name__ = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__magic_name__ = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__magic_name__ = os.environ.get('''USER_TOKEN''', '''''')
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Any = {
'Authorization': F"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__magic_name__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__magic_name__ = parser.parse_args()
__magic_name__ = '''cpu'''
__magic_name__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__magic_name__ = '''path-to-your-trained-model'''
__magic_name__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__magic_name__ = pipe.to(device)
# to channels last
__magic_name__ = pipe.unet.to(memory_format=torch.channels_last)
__magic_name__ = pipe.vae.to(memory_format=torch.channels_last)
__magic_name__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__magic_name__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__magic_name__ = torch.randn(2, 4, 64, 64)
__magic_name__ = torch.rand(1) * 999
__magic_name__ = torch.randn(2, 77, 768)
__magic_name__ = (sample, timestep, encoder_hidden_status)
try:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__magic_name__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__magic_name__ = 666
__magic_name__ = torch.Generator(device).manual_seed(seed)
__magic_name__ = {'''generator''': generator}
if args.steps is not None:
__magic_name__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__magic_name__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__magic_name__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__magic_name__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__magic_name__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = CHRF.CHAR_ORDER , _UpperCAmelCase = CHRF.WORD_ORDER , _UpperCAmelCase = CHRF.BETA , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
__snake_case : Tuple = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__snake_case : Optional[Any] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__snake_case : int = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
__magic_name__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__magic_name__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__magic_name__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ):
assert len(str(__UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__snake_case : List[str] = year // 1_00
__snake_case : str = (5 * (century % 4) + 2) % 7
__snake_case : str = year % 1_00
__snake_case : List[str] = centurian % 12
__snake_case : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__snake_case : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__snake_case : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
__magic_name__ = TypeVar('''U''')
class __SCREAMING_SNAKE_CASE ( Generic[T, U]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = key
__snake_case : Optional[Any] = val
__snake_case : DoubleLinkedListNode[T, U] | None = None
__snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __SCREAMING_SNAKE_CASE ( Generic[T, U]):
"""simple docstring"""
def __init__( self ):
__snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ):
__snake_case : Optional[Any] = ['DoubleLinkedList']
__snake_case : Tuple = self.head
while node.next is not None:
rep.append(str(_UpperCAmelCase ) )
__snake_case : List[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__snake_case : str = node
__snake_case : Tuple = previous
__snake_case : Any = node
__snake_case : Optional[Any] = self.rear
def lowercase_ ( self , _UpperCAmelCase ):
if node.prev is None or node.next is None:
return None
__snake_case : str = node.next
__snake_case : str = node.prev
__snake_case : Optional[int] = None
__snake_case : str = None
return node
class __SCREAMING_SNAKE_CASE ( Generic[T, U]):
"""simple docstring"""
__UpperCAmelCase = {}
def __init__( self , _UpperCAmelCase ):
__snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
__snake_case : List[Any] = capacity
__snake_case : int = 0
__snake_case : Optional[Any] = 0
__snake_case : Tuple = 0
__snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , _UpperCAmelCase ):
return key in self.cache
def lowercase_ ( self , _UpperCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
__snake_case : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_UpperCAmelCase )
return node.val
self.miss += 1
return None
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__snake_case : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_UpperCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__snake_case : Optional[int] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__snake_case : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__snake_case : Union[str, Any] = value
self.list.add(_UpperCAmelCase )
@classmethod
def lowercase_ ( cls , _UpperCAmelCase = 128 ):
def cache_decorator_inner(_UpperCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*_UpperCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__snake_case : List[str] = LRUCache(_UpperCAmelCase )
__snake_case : List[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__snake_case : List[Any] = func(*_UpperCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _UpperCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_UpperCAmelCase , 'cache_info' , _UpperCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase_ ( self ):
__snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained('google/mt5-small' )
__snake_case : Optional[int] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
__snake_case : Tuple = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__snake_case : List[str] = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss
__snake_case : int = -tf.math.reduce_mean(_UpperCAmelCase ).numpy()
__snake_case : Any = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = '''ResNetConfig'''
# Base docstring
__magic_name__ = '''microsoft/resnet-50'''
__magic_name__ = [1, 2_048, 7, 7]
# Image classification docstring
__magic_name__ = '''microsoft/resnet-50'''
__magic_name__ = '''tiger cat'''
__magic_name__ = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
__snake_case : Tuple = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , bias=_UpperCAmelCase )
__snake_case : Tuple = nn.BatchNormad(_UpperCAmelCase )
__snake_case : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.convolution(_UpperCAmelCase )
__snake_case : Any = self.normalization(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
__snake_case : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__snake_case : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__snake_case : Optional[int] = config.num_channels
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__snake_case : int = self.embedder(_UpperCAmelCase )
__snake_case : Optional[int] = self.pooler(_UpperCAmelCase )
return embedding
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 ):
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__snake_case : List[Any] = nn.BatchNormad(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.convolution(_UpperCAmelCase )
__snake_case : int = self.normalization(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
__snake_case : List[Any] = in_channels != out_channels or stride != 1
__snake_case : Any = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__snake_case : Optional[int] = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , activation=_UpperCAmelCase ) , )
__snake_case : List[str] = ACTaFN[activation]
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = hidden_state
__snake_case : Optional[Any] = self.layer(_UpperCAmelCase )
__snake_case : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__snake_case : Optional[Any] = self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 4 ):
super().__init__()
__snake_case : Union[str, Any] = in_channels != out_channels or stride != 1
__snake_case : int = out_channels // reduction
__snake_case : List[Any] = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__snake_case : str = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__snake_case : Union[str, Any] = ACTaFN[activation]
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = hidden_state
__snake_case : int = self.layer(_UpperCAmelCase )
__snake_case : List[str] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__snake_case : List[str] = self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ):
super().__init__()
__snake_case : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , activation=config.hidden_act ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = input
for layer in self.layers:
__snake_case : int = layer(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
__snake_case : int = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ):
__snake_case : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : List[str] = hidden_states + (hidden_state,)
__snake_case : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__snake_case : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ResNetConfig
__UpperCAmelCase = "resnet"
__UpperCAmelCase = "pixel_values"
__UpperCAmelCase = True
def lowercase_ ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = value
__magic_name__ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__magic_name__ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config
__snake_case : List[Any] = ResNetEmbeddings(_UpperCAmelCase )
__snake_case : Optional[int] = ResNetEncoder(_UpperCAmelCase )
__snake_case : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
__snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : List[str] = self.embedder(_UpperCAmelCase )
__snake_case : Optional[Any] = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__snake_case : Optional[int] = encoder_outputs[0]
__snake_case : str = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : List[str] = ResNetModel(_UpperCAmelCase )
# classification head
__snake_case : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Optional[Any] = self.resnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__snake_case : str = outputs.pooler_output if return_dict else outputs[1]
__snake_case : Union[str, Any] = self.classifier(_UpperCAmelCase )
__snake_case : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case : Optional[int] = 'single_label_classification'
else:
__snake_case : List[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case : int = MSELoss()
if self.num_labels == 1:
__snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case : List[str] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__snake_case : str = CrossEntropyLoss()
__snake_case : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case : Any = BCEWithLogitsLoss()
__snake_case : Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__snake_case : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
super()._init_backbone(_UpperCAmelCase )
__snake_case : List[str] = [config.embedding_size] + config.hidden_sizes
__snake_case : Optional[Any] = ResNetEmbeddings(_UpperCAmelCase )
__snake_case : List[str] = ResNetEncoder(_UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
__snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : List[Any] = self.embedder(_UpperCAmelCase )
__snake_case : str = self.encoder(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__snake_case : Tuple = outputs.hidden_states
__snake_case : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case : int = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_UpperCAmelCase , )
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["image_processor", "tokenizer"]
__UpperCAmelCase = "BlipImageProcessor"
__UpperCAmelCase = "AutoTokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__snake_case : Union[str, Any] = self.tokenizer
__snake_case : Optional[int] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
# add pixel_values
__snake_case : Optional[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
if text is not None:
__snake_case : int = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
else:
__snake_case : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.tokenizer.model_input_names
__snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__ = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__magic_name__ = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__magic_name__ = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
__snake_case : List[Any] = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__snake_case : Optional[int] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__snake_case : Union[str, Any] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__snake_case : Union[str, Any] = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
import numpy
# List of input, output pairs
__magic_name__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__magic_name__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
__magic_name__ = [2, 4, 1, 5]
__magic_name__ = len(train_data)
__magic_name__ = 0.009
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]="train" ):
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
__snake_case : Optional[int] = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=m ):
__snake_case : Optional[Any] = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : List[str] = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def UpperCAmelCase__( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__snake_case : Tuple = 0.000002
__snake_case : str = 0
__snake_case : List[str] = 0
while True:
j += 1
__snake_case : List[str] = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
__snake_case : Optional[int] = get_cost_derivative(i - 1 )
__snake_case : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
__snake_case : Tuple = temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCAmelCase__( ):
for i in range(len(__UpperCAmelCase ) ):
print(('Actual output value:', output(__UpperCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(__UpperCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''T5Config'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=False ):
__snake_case : List[Any] = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None ):
if conf_path is None:
__snake_case : Optional[int] = './model_checkpoints/vqgan_only.yaml'
__snake_case : List[Any] = load_config(__UpperCAmelCase , display=__UpperCAmelCase )
__snake_case : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__snake_case : Dict = './model_checkpoints/vqgan_only.pt'
__snake_case : Optional[Any] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
__snake_case : Tuple = sd['state_dict']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
__snake_case : Optional[Any] = model.encode(__UpperCAmelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__snake_case : List[Any] = model.decode(__UpperCAmelCase )
return xrec
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=False ):
__snake_case : List[str] = string.rsplit('.' , 1 )
if reload:
__snake_case : Dict = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True ):
__snake_case : Union[str, Any] = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
# load the specified checkpoint
if ckpt:
__snake_case : List[str] = torch.load(__UpperCAmelCase , map_location='cpu' )
__snake_case : Optional[Any] = pl_sd['global_step']
print(F"""loaded model from global step {global_step}.""" )
else:
__snake_case : str = {'state_dict': None}
__snake_case : List[Any] = None
__snake_case : str = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['model']
return model, global_step
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
super().__init__()
__snake_case : Union[str, Any] = initial_learning_rate
__snake_case : List[Any] = warmup_steps
__snake_case : Tuple = power
__snake_case : Any = decay_schedule_fn
__snake_case : str = name
def __call__( self , _UpperCAmelCase ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__snake_case : int = tf.cast(_UpperCAmelCase , tf.floataa )
__snake_case : Union[str, Any] = tf.cast(self.warmup_steps , tf.floataa )
__snake_case : Optional[Any] = global_step_float / warmup_steps_float
__snake_case : Optional[int] = self.initial_learning_rate * tf.math.pow(_UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_UpperCAmelCase , )
def lowercase_ ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : float = 0.9 , __UpperCAmelCase : float = 0.999 , __UpperCAmelCase : float = 1E-8 , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : Optional[List[str]] = None , ):
__snake_case : Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , )
if num_warmup_steps:
__snake_case : str = WarmUp(
initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , )
if weight_decay_rate > 0.0:
__snake_case : Union[str, Any] = AdamWeightDecay(
learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=__UpperCAmelCase , )
else:
__snake_case : Tuple = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase = 0.001 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.999 , _UpperCAmelCase = 1E-7 , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "AdamWeightDecay" , **_UpperCAmelCase , ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[Any] = weight_decay_rate
__snake_case : List[Any] = include_in_weight_decay
__snake_case : List[str] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , _UpperCAmelCase ):
__snake_case : int = {'WarmUp': WarmUp}
return super(_UpperCAmelCase , cls ).from_config(_UpperCAmelCase , custom_objects=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super(_UpperCAmelCase , self )._prepare_local(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
__snake_case : int = list(zip(*_UpperCAmelCase ) )
return super(_UpperCAmelCase , self ).apply_gradients(zip(_UpperCAmelCase , _UpperCAmelCase ) , name=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__snake_case : str = apply_state or {}
__snake_case : List[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__snake_case : Tuple = self._fallback_apply_state(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _UpperCAmelCase )
__snake_case : Union[str, Any] = self._decay_weights_op(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase , self )._resource_apply_dense(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , _UpperCAmelCase )
__snake_case : Optional[int] = self._decay_weights_op(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase , self )._resource_apply_sparse(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , _UpperCAmelCase ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_UpperCAmelCase , _UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_UpperCAmelCase , _UpperCAmelCase ) is not None:
return False
return True
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self ):
__snake_case : Dict = []
__snake_case : List[str] = None
@property
def lowercase_ ( self ):
if self._accum_steps is None:
__snake_case : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _UpperCAmelCase ):
if not self._gradients:
__snake_case : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_UpperCAmelCase ) , trainable=_UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_UpperCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , _UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_UpperCAmelCase )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_UpperCAmelCase ) )
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
__magic_name__ , __magic_name__ = shutil.get_terminal_size()
__magic_name__ = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __SCREAMING_SNAKE_CASE ( enum.Enum):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]="" ):
sys.stdout.write(str(__UpperCAmelCase ) + end )
sys.stdout.flush()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , __UpperCAmelCase )
def UpperCAmelCase__( ):
forceWrite('\r' )
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def UpperCAmelCase__( ):
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__( ):
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase__( __UpperCAmelCase : Tuple ) -> Dict:
__snake_case : Tuple = os.path.join(args.tf_model_dir , 'parameters.json' )
__snake_case : Optional[Any] = json.loads(open(__UpperCAmelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
__snake_case : Optional[Any] = args.output + '.pt'
__snake_case : List[Any] = OrderedDict()
with tf.device('/CPU:0' ):
__snake_case : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
__snake_case : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__snake_case : Union[str, Any] = reader.get_tensor(__UpperCAmelCase ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
__snake_case : Optional[Any] = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
__snake_case : Dict = 8
__snake_case : Optional[Any] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__snake_case : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/moe' ):
__snake_case : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
__snake_case : str = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
__snake_case : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/softmlp/kernel' ):
__snake_case : List[str] = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
__snake_case : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
__snake_case : Tuple = key_name[-9:-7]
for i in range(16 ):
__snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
__snake_case : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/mlp' ):
__snake_case : int = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
__snake_case : int = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
__snake_case : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Any = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p1/bias' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
__snake_case : str = vnp.copy() # same because it is one dimensional
__snake_case : str = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p2/kernel' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
__snake_case : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p2/bias' ):
__snake_case : str = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
__snake_case : int = vnp.copy() # same because it is one dimensional
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/ln' ):
__snake_case : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.norm.bias' % player
__snake_case : str = vnp.copy() # same because it is one dimensional
__snake_case : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/g' ):
__snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.norm.weight' % player
__snake_case : List[str] = vnp.copy() # same because it is one dimensional
__snake_case : List[str] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/att' ):
__snake_case : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
__snake_case : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__snake_case : str = state[:, 0, :, :]
__snake_case : Tuple = state[:, 1, :, :]
__snake_case : int = state[:, 2, :, :]
__snake_case : Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Union[str, Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
__snake_case : str = torch.tensor(__UpperCAmelCase )
__snake_case : List[str] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
__snake_case : Union[str, Any] = torch.tensor(__UpperCAmelCase )
__snake_case : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/o/kernel' ):
__snake_case : Dict = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
__snake_case : Union[str, Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : str = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/an' ):
__snake_case : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__snake_case : List[Any] = 'model.blocks.%d.self_attn.norm.bias' % player
__snake_case : Optional[Any] = vnp.copy() # same because it is one dimensional
__snake_case : List[str] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/g' ):
__snake_case : str = 'model.blocks.%d.self_attn.norm.weight' % player
__snake_case : Optional[int] = vnp.copy() # same because it is one dimensional
__snake_case : Dict = torch.tensor(__UpperCAmelCase )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
__snake_case : str = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
__snake_case : Dict = 'model.%s.weight' % nlayer
__snake_case : Union[str, Any] = vnp.copy() # same in embedded
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
if key_name.startswith('model/wte' ):
__snake_case : Union[str, Any] = 'lm_head.weight'
__snake_case : List[str] = vnp.copy() # same in embedded
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/wob' ):
__snake_case : Union[str, Any] = 'final_logits_bias'
__snake_case : Optional[int] = vnp.copy() # same in embedded
__snake_case : Tuple = state.reshape((1, -1) )
__snake_case : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense/kernel":
__snake_case : List[str] = 'model.last_project.weight'
__snake_case : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Dict = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense_1/bias":
__snake_case : Optional[int] = 'model.last_project.bias'
__snake_case : Union[str, Any] = vnp.copy() # same because it is one dimensional
__snake_case : Any = torch.tensor(__UpperCAmelCase )
torch.save(__UpperCAmelCase , args.output )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__magic_name__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : str = size if size is not None else {'shortest_edge': 224}
__snake_case : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__snake_case : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__snake_case : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size' )
__snake_case : Optional[Any] = do_resize
__snake_case : Dict = size
__snake_case : Optional[int] = resample
__snake_case : Union[str, Any] = do_center_crop
__snake_case : Optional[Any] = crop_size
__snake_case : List[str] = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Union[str, Any] = do_normalize
__snake_case : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : Optional[int] = do_convert_rgb
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case : Tuple = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : str = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : str = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase )
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__snake_case : int = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase )
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Any = image_std if image_std is not None else self.image_std
__snake_case : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Union[str, Any] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : Optional[int] = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__snake_case : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
__snake_case : Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__snake_case : List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__snake_case : Dict = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCAmelCase ( _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , ) -> list[float]:
__snake_case , __snake_case = coefficient_matrix.shape
__snake_case , __snake_case = constant_matrix.shape
if rowsa != colsa:
__snake_case = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if colsa != 1:
__snake_case = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
__snake_case = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
__snake_case = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(_UpperCAmelCase )} and {rowsa}'''
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__snake_case = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__snake_case , __snake_case = table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
__snake_case = []
for row in range(_UpperCAmelCase ):
__snake_case = 0
for col in range(_UpperCAmelCase ):
if col == row:
__snake_case = table[row][col]
elif col == cols - 1:
__snake_case = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__snake_case = (temp + val) / denom
new_val.append(_UpperCAmelCase )
__snake_case = new_val
return [float(_UpperCAmelCase ) for i in new_val]
def __UpperCAmelCase ( _UpperCAmelCase : NDArray[floataa] ) -> bool:
__snake_case , __snake_case = table.shape
__snake_case = True
for i in range(0 , _UpperCAmelCase ):
__snake_case = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case = grid[0]
for row_n in range(1 , len(_UpperCAmelCase ) ):
__snake_case = grid[row_n]
__snake_case = fill_row(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Optional[int] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """gpt_bigcode"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , a_ : Optional[int]=50_257 , a_ : List[str]=1_024 , a_ : List[Any]=768 , a_ : Tuple=12 , a_ : Any=12 , a_ : Any=None , a_ : Any="gelu_pytorch_tanh" , a_ : List[Any]=0.1 , a_ : List[Any]=0.1 , a_ : int=0.1 , a_ : Any=1e-5 , a_ : List[str]=0.02 , a_ : str=True , a_ : Optional[int]=True , a_ : Tuple=50_256 , a_ : List[str]=50_256 , a_ : Union[str, Any]=True , a_ : int=True , a_ : Dict=True , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = n_inner
__snake_case = activation_function
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = scale_attn_weights
__snake_case = use_cache
__snake_case = attention_softmax_in_fpaa
__snake_case = scale_attention_softmax_in_fpaa
__snake_case = multi_query
__snake_case = bos_token_id
__snake_case = eos_token_id
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
| 680 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=a_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a_ , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=a_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=a_ )
__snake_case = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a_ , layers_per_block=1 , upcast_attention=a_ , use_linear_projection=a_ , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=a_ , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def A ( self : Tuple , a_ : Dict , a_ : str=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=a_ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe("anime turle" , generator=a_ , output_type="np" )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_ , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 680 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = JukeboxTokenizer
__SCREAMING_SNAKE_CASE = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def A ( self : List[Any] ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A ( self : Any ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 680 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Tuple = logging.get_logger(__name__)
a : Union[str, Any] = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """deta"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , a_ : str=None , a_ : List[Any]=900 , a_ : Optional[Any]=2_048 , a_ : Optional[int]=6 , a_ : Dict=2_048 , a_ : Optional[Any]=8 , a_ : List[str]=6 , a_ : Optional[Any]=1_024 , a_ : List[Any]=8 , a_ : List[Any]=0.0 , a_ : int=True , a_ : Union[str, Any]="relu" , a_ : Optional[int]=256 , a_ : Union[str, Any]=0.1 , a_ : Tuple=0.0 , a_ : Optional[Any]=0.0 , a_ : Optional[int]=0.02 , a_ : str=1.0 , a_ : Dict=True , a_ : Dict=False , a_ : int="sine" , a_ : str=5 , a_ : Any=4 , a_ : Union[str, Any]=4 , a_ : Tuple=True , a_ : str=300 , a_ : List[Any]=True , a_ : Dict=True , a_ : Dict=1 , a_ : Optional[Any]=5 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=1 , a_ : Dict=1 , a_ : str=5 , a_ : List[Any]=2 , a_ : Dict=0.1 , a_ : Any=0.25 , **a_ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a_ , a_ ):
__snake_case = backbone_config.pop("model_type" )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(a_ )
__snake_case = backbone_config
__snake_case = num_queries
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
# deformable attributes
__snake_case = num_feature_levels
__snake_case = encoder_n_points
__snake_case = decoder_n_points
__snake_case = two_stage
__snake_case = two_stage_num_proposals
__snake_case = with_box_refine
__snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
__snake_case = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : str ):
"""simple docstring"""
return self.d_model
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 680 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 1 |
'''simple docstring'''
import pprint
import requests
a : int = '''https://zenquotes.io/api'''
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
a : str = random_quotes()
pprint.pprint(response)
| 680 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : str ) -> list[int]:
__snake_case = int(_UpperCAmelCase )
# Initialize Result
__snake_case = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a : List[str] = []
a : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
a : str = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
a : Dict = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
a : Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
a : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
a : List[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 680 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
a : List[str] = False
a : Optional[int] = False
def __UpperCAmelCase ( _UpperCAmelCase : Namespace ) -> Union[str, Any]:
return TrainCommand(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@staticmethod
def A ( a_ : ArgumentParser ):
"""simple docstring"""
__snake_case = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=a_ , required=a_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=a_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=a_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=a_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=a_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=a_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=a_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=a_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=a_ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=a_ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=a_ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=a_ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=a_ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=a_ )
def __init__( self : Union[str, Any] , a_ : Namespace ):
"""simple docstring"""
__snake_case = logging.get_logger("transformers-cli/training" )
__snake_case = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=a_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def A ( self : int ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A ( self : Dict ):
"""simple docstring"""
raise NotImplementedError
def A ( self : Optional[int] ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 680 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self : Any , a_ : bool , a_ : Optional[int] = None , a_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__snake_case = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__snake_case = torch.zeros(a_ , a_ )
else:
__snake_case = None
__snake_case = torch.nn.Parameter(a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__( self : Tuple , a_ : VQModel , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : TransformeraDModel , a_ : VQDiffusionScheduler , a_ : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=a_ , transformer=a_ , text_encoder=a_ , tokenizer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
def A ( self : Union[str, Any] , a_ : str , a_ : str , a_ : Dict ):
"""simple docstring"""
__snake_case = len(a_ ) if isinstance(a_ , a_ ) else 1
# get prompt text embeddings
__snake_case = self.tokenizer(
a_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__snake_case = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a_ )
# duplicate text embeddings for each generation per prompt
__snake_case = prompt_embeds.repeat_interleave(a_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__snake_case = self.learned_classifier_free_sampling_embeddings.embeddings
__snake_case = negative_prompt_embeds.unsqueeze(0 ).repeat(a_ , 1 , 1 )
else:
__snake_case = [""] * batch_size
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="pt" , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__snake_case = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = negative_prompt_embeds.shape[1]
__snake_case = negative_prompt_embeds.repeat(1 , a_ , 1 )
__snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , a_ : Union[str, List[str]] , a_ : int = 100 , a_ : float = 5.0 , a_ : float = 1.0 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , ):
"""simple docstring"""
if isinstance(a_ , a_ ):
__snake_case = 1
elif isinstance(a_ , a_ ):
__snake_case = len(a_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
__snake_case = batch_size * num_images_per_prompt
__snake_case = guidance_scale > 1.0
__snake_case = self._encode_prompt(a_ , a_ , a_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(a_ )}.''' )
# get the initial completely masked latents unless the user supplied it
__snake_case = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__snake_case = self.transformer.num_vector_embeds - 1
__snake_case = torch.full(a_ , a_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__snake_case = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
__snake_case = self.scheduler.timesteps.to(self.device )
__snake_case = latents
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the sample if we are doing classifier free guidance
__snake_case = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__snake_case = self.transformer(a_ , encoder_hidden_states=a_ , timestep=a_ ).sample
if do_classifier_free_guidance:
__snake_case , __snake_case = model_output.chunk(2 )
__snake_case = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a_ , dim=1 , keepdim=a_ )
__snake_case = self.truncate(a_ , a_ )
# remove `log(0)`'s (`-inf`s)
__snake_case = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , timestep=a_ , sample=a_ , generator=a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case = self.vqvae.config.vq_embed_dim
__snake_case = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__snake_case = self.vqvae.quantize.get_codebook_entry(a_ , shape=a_ )
__snake_case = self.vqvae.decode(a_ , force_not_quantize=a_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
def A ( self : Dict , a_ : torch.FloatTensor , a_ : float ):
"""simple docstring"""
__snake_case , __snake_case = torch.sort(a_ , 1 , descending=a_ )
__snake_case = torch.exp(a_ )
__snake_case = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__snake_case = torch.full_like(keep_mask[:, 0:1, :] , a_ )
__snake_case = torch.cat((all_true, keep_mask) , dim=1 )
__snake_case = keep_mask[:, :-1, :]
__snake_case = keep_mask.gather(1 , indices.argsort(1 ) )
__snake_case = log_p_x_0.clone()
__snake_case = -torch.inf # -inf = log(0)
return rv
| 680 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[Any]:
__snake_case = k_size // 2
__snake_case , __snake_case = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__snake_case = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Optional[int]:
__snake_case , __snake_case = image.shape[0], image.shape[1]
# dst image height and width
__snake_case = height - k_size + 1
__snake_case = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__snake_case = zeros((dst_height * dst_width, k_size * k_size) )
__snake_case = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__snake_case = ravel(image[i : i + k_size, j : j + k_size] )
__snake_case = window
row += 1
# turn the kernel into shape(k*k, 1)
__snake_case = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = ravel(_UpperCAmelCase )
# reshape and get the dst image
__snake_case = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
a : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
a : Optional[int] = gaussian_filter(gray, 3, sigma=1)
a : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 680 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """xglm"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , a_ : List[str]=256_008 , a_ : int=2_048 , a_ : Optional[int]=1_024 , a_ : Tuple=4_096 , a_ : Union[str, Any]=24 , a_ : List[Any]=16 , a_ : int="gelu" , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=0.1 , a_ : Optional[int]=0.0 , a_ : Dict=0.0 , a_ : Union[str, Any]=0.02 , a_ : Dict=True , a_ : Union[str, Any]=True , a_ : Any=2 , a_ : int=1 , a_ : Union[str, Any]=0 , a_ : Dict=2 , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = ffn_dim
__snake_case = num_layers
__snake_case = attention_heads
__snake_case = activation_function
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = layerdrop
__snake_case = init_std
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = use_cache
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , )
| 680 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.