code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""yjernite/retribert-base-uncased""": 512,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =RetriBertTokenizer
lowerCamelCase__ =['input_ids', 'attention_mask']
def __init__(self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
'''simple docstring'''
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , a_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , a_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , a_ ) != tokenize_chinese_chars
):
__snake_case : Optional[int] = getattr(a_ , normalizer_state.pop('''type''' ) )
__snake_case : Tuple = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : Union[str, Any] = tokenize_chinese_chars
__snake_case : int = normalizer_class(**a_ )
__snake_case : Union[str, Any] = do_lower_case
def SCREAMING_SNAKE_CASE (self , a_ , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : List[str] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a_ , )
super().__init__(*a_ , **a_ )
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__snake_case : str = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case : str = parent
__snake_case : Optional[Any] = batch_size
__snake_case : int = num_channels
__snake_case : List[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[str] = do_resize
__snake_case : str = size
__snake_case : Union[str, Any] = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-06 , a_=2_24 , a_=16 , a_=3 , a_=True , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : List[str] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : Tuple = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = qkv_bias
| 24 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24 | 1 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE : Optional[Any] = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
SCREAMING_SNAKE_CASE : List[str] = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
SCREAMING_SNAKE_CASE : str = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowercase ( _snake_case : str , _snake_case : str ) ->Any:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
return {"accuracy": simple_accuracy(a_ , a_ )}
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 24 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='beit'
def __init__(self , a_=81_92 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=2_24 , a_=16 , a_=3 , a_=False , a_=False , a_=False , a_=False , a_=0.1 , a_=0.1 , a_=True , a_=[3, 5, 7, 11] , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=2_56 , a_=1 , a_=False , a_=2_55 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Any = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : Tuple = use_mask_token
__snake_case : int = use_absolute_position_embeddings
__snake_case : Any = use_relative_position_bias
__snake_case : Any = use_shared_relative_position_bias
__snake_case : Tuple = layer_scale_init_value
__snake_case : Dict = drop_path_rate
__snake_case : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[int] = out_indices
__snake_case : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : str = use_auxiliary_head
__snake_case : List[Any] = auxiliary_loss_weight
__snake_case : Tuple = auxiliary_channels
__snake_case : int = auxiliary_num_convs
__snake_case : Dict = auxiliary_concat_input
__snake_case : List[Any] = semantic_loss_ignore_index
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase ( _snake_case : List[str] , _snake_case : Any=False ) ->Optional[int]:
"""simple docstring"""
try:
__snake_case : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Tuple = strtobool(_snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE : str = parse_flag_from_env("""RUN_SLOW""", default=False)
def lowercase ( _snake_case : Optional[Any] ) ->List[str]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_snake_case )
def lowercase ( _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_snake_case )
def lowercase ( _snake_case : Optional[int] ) ->List[str]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_snake_case )
def lowercase ( _snake_case : Tuple ) ->Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_snake_case )
def lowercase ( _snake_case : List[Any] ) ->int:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_snake_case )
def lowercase ( _snake_case : Optional[Any] ) ->List[str]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_snake_case )
def lowercase ( _snake_case : Dict ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_snake_case )
def lowercase ( _snake_case : List[Any] ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_snake_case )
def lowercase ( _snake_case : str ) ->Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_snake_case )
def lowercase ( _snake_case : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_snake_case )
def lowercase ( _snake_case : Any ) ->Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_snake_case )
def lowercase ( _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_snake_case )
def lowercase ( _snake_case : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->Any:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_snake_case )
def lowercase ( _snake_case : int=None , _snake_case : List[Any]=None ) ->str:
"""simple docstring"""
if test_case is None:
return partial(_snake_case , version=_snake_case )
return unittest.skipUnless(is_torch_version('''>=''' , _snake_case ) , f"""test requires torch version >= {version}""" )(_snake_case )
def lowercase ( _snake_case : Tuple ) ->List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->Any:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_snake_case )
def lowercase ( _snake_case : Tuple ) ->int:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase ( _snake_case : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_snake_case )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
@classmethod
def SCREAMING_SNAKE_CASE (cls ):
'''simple docstring'''
__snake_case : Dict = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE (cls ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a_ )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = mocks if isinstance(a_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase ( _snake_case : Tuple ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = AcceleratorState()
__snake_case : Union[str, Any] = tensor[None].clone().to(state.device )
__snake_case : Dict = gather(_snake_case ).cpu()
__snake_case : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _snake_case ):
return False
return True
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = returncode
__snake_case : str = stdout
__snake_case : List[Any] = stderr
async def lowercase ( _snake_case : str , _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
while True:
__snake_case : str = await stream.readline()
if line:
callback(_snake_case )
else:
break
async def lowercase ( _snake_case : Tuple , _snake_case : int=None , _snake_case : Any=None , _snake_case : List[Any]=None , _snake_case : List[str]=False , _snake_case : str=False ) ->_RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_snake_case ) )
__snake_case : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Optional[int] = []
__snake_case : Tuple = []
def tee(_snake_case : Tuple , _snake_case : str , _snake_case : int , _snake_case : Any="" ):
__snake_case : Tuple = line.decode('''utf-8''' ).rstrip()
sink.append(_snake_case )
if not quiet:
print(_snake_case , _snake_case , file=_snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _snake_case : tee(_snake_case , _snake_case , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _snake_case : tee(_snake_case , _snake_case , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_snake_case , )
return _RunOutput(await p.wait() , _snake_case , _snake_case )
def lowercase ( _snake_case : str , _snake_case : Union[str, Any]=None , _snake_case : int=None , _snake_case : List[str]=180 , _snake_case : Optional[int]=False , _snake_case : Optional[Any]=True ) ->_RunOutput:
"""simple docstring"""
__snake_case : List[str] = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(_snake_case , env=_snake_case , stdin=_snake_case , timeout=_snake_case , quiet=_snake_case , echo=_snake_case ) )
__snake_case : Tuple = ''' '''.join(_snake_case )
if result.returncode > 0:
__snake_case : Tuple = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
pass
def lowercase ( _snake_case : List[str] , _snake_case : Union[str, Any]=False ) ->Tuple:
"""simple docstring"""
try:
__snake_case : Optional[int] = subprocess.check_output(_snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_snake_case , '''decode''' ):
__snake_case : str = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(_snake_case )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : list ) ->list:
"""simple docstring"""
if len(_snake_case ) <= 1:
return lst
__snake_case : List[str] = 1
while i < len(_snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__snake_case , __snake_case : Any = lst[i], lst[i - 1]
i -= 1
if i == 0:
__snake_case : Any = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 24 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE : List[Any] = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Tuple = pos_x
__snake_case : List[Any] = pos_y
__snake_case : Dict = (pos_y, pos_x)
__snake_case : List[Any] = goal_x
__snake_case : Tuple = goal_y
__snake_case : str = g_cost
__snake_case : Optional[int] = parent
__snake_case : Optional[Any] = self.calculate_heuristic()
__snake_case : List[Any] = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.pos_x - self.goal_x
__snake_case : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(a_ ) + abs(a_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self , a_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a_ )
__snake_case : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a_ )
__snake_case : Optional[Any] = [self.start]
__snake_case : list[Node] = []
__snake_case : List[str] = False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(a_ )
self.closed_nodes.append(a_ )
__snake_case : Any = self.get_successors(a_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a_ )
else:
# retrieve the best current path
__snake_case : Optional[int] = self.open_nodes.pop(self.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a_ )
else:
self.open_nodes.append(a_ )
return [self.start.pos]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = []
for action in delta:
__snake_case : Optional[Any] = parent.pos_x + action[1]
__snake_case : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a_ , a_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a_ , ) )
return successors
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = node
__snake_case : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case : int = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = AStar(a_ , a_ )
__snake_case : Dict = AStar(a_ , a_ )
__snake_case : int = False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__snake_case : List[str] = self.fwd_astar.open_nodes.pop(0 )
__snake_case : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
a_ , a_ )
self.fwd_astar.closed_nodes.append(a_ )
self.bwd_astar.closed_nodes.append(a_ )
__snake_case : Any = current_bwd_node
__snake_case : int = current_fwd_node
__snake_case : str = {
self.fwd_astar: self.fwd_astar.get_successors(a_ ),
self.bwd_astar: self.bwd_astar.get_successors(a_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(a_ )
else:
# retrieve the best current path
__snake_case : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(a_ )
else:
astar.open_nodes.append(a_ )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self.fwd_astar.retrace_path(a_ )
__snake_case : Any = self.bwd_astar.retrace_path(a_ )
bwd_path.pop()
bwd_path.reverse()
__snake_case : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : Tuple = (0, 0)
SCREAMING_SNAKE_CASE : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE : List[str] = time.time()
SCREAMING_SNAKE_CASE : str = AStar(init, goal)
SCREAMING_SNAKE_CASE : Tuple = a_star.search()
SCREAMING_SNAKE_CASE : int = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
SCREAMING_SNAKE_CASE : Dict = time.time()
SCREAMING_SNAKE_CASE : List[str] = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE : Tuple = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 24 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase ( ) ->tuple[list[int], int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
__snake_case : int = randint(-5_000 , 5_000 )
return (arr, r)
SCREAMING_SNAKE_CASE : Any = make_dataset()
def lowercase ( _snake_case : list[int] , _snake_case : int ) ->tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_snake_case , 3 ):
if sum(_snake_case ) == target:
return tuple(sorted(_snake_case ) )
return (0, 0, 0)
def lowercase ( _snake_case : list[int] , _snake_case : int ) ->tuple[int, int, int]:
"""simple docstring"""
arr.sort()
__snake_case : int = len(_snake_case )
for i in range(n - 1 ):
__snake_case , __snake_case : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase ( ) ->tuple[float, float]:
"""simple docstring"""
__snake_case : Tuple = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__snake_case : Optional[int] = '''
triplet_sum1(*dataset)
'''
__snake_case : Optional[Any] = '''
triplet_sum2(*dataset)
'''
__snake_case : List[Any] = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=10_000 )
__snake_case : str = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=10_000 )
return (min(_snake_case ), min(_snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Union[str, Any] = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 1 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE : List[Any] = [0, 25, 50]
SCREAMING_SNAKE_CASE : str = [25, 50, 75]
SCREAMING_SNAKE_CASE : str = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE : List[str] = np.ones(75)
SCREAMING_SNAKE_CASE : List[str] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE : Optional[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=3 , a_=2_24 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__snake_case : str = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Union[str, Any] = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : List[Any] = image_size
__snake_case : int = min_resolution
__snake_case : int = max_resolution
__snake_case : str = do_resize
__snake_case : Tuple = size
__snake_case : List[str] = do_normalize
__snake_case : List[str] = image_mean
__snake_case : List[Any] = image_std
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = EfficientFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case : Optional[int] = image_processor(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processor(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case : List[str] = image_processor(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 24 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : int = 600_851_475_143 ) ->int:
"""simple docstring"""
try:
__snake_case : int = int(_snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__snake_case : Tuple = 1
__snake_case : str = 2
while i * i <= n:
while n % i == 0:
__snake_case : List[Any] = i
n //= i
i += 1
if n > 1:
__snake_case : Optional[int] = n
return int(_snake_case )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase ( _snake_case : str = "" ) ->dict[str, float]:
"""simple docstring"""
__snake_case : List[str] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
__snake_case : str = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
__snake_case : List[str] = soup.find_all('''td''' , attrs='''titleColumn''' )
__snake_case : Tuple = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_snake_case , _snake_case )
}
def lowercase ( _snake_case : str = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
__snake_case : Dict = get_imdb_top_aaa_movies()
with open(_snake_case , '''w''' , newline='''''' ) as out_file:
__snake_case : Optional[Any] = csv.writer(_snake_case )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 24 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
"""simple docstring"""
import math
def lowercase ( _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
return math.pow(_snake_case , 2 ) - a
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return 2 * x
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : Optional[Any] = 2.0
while start <= a:
__snake_case : List[Any] = math.pow(_snake_case , 2 )
return start
def lowercase ( _snake_case : float , _snake_case : int = 9_999 , _snake_case : float = 0.00000000000001 ) ->float:
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
__snake_case : Tuple = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__snake_case : str = value
__snake_case : Dict = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 24 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCamelCase__ =field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
lowerCamelCase__ =field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCamelCase__ =field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCamelCase__ =field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
lowerCamelCase__ =field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCamelCase__ =field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
lowerCamelCase__ =field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
lowerCamelCase__ =field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Source language id for translation.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Target language id for translation.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': '# num_beams to use for evaluation.'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase ( _snake_case : Optional[Any] , _snake_case : str , _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(_snake_case , os.path.join(_snake_case , f"""{split}_results.json""" ) )
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
__snake_case : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_snake_case , _snake_case , _snake_case ):
assert hasattr(_snake_case , _snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case , _snake_case ):
__snake_case : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case : Any = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case : Union[str, Any] = SeqaSeqDataset
# Get datasets
__snake_case : Optional[int] = (
dataset_class(
_snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case : Any = (
dataset_class(
_snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case : str = (
dataset_class(
_snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case : List[str] = (
build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None
)
__snake_case : int = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator(
_snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , )
__snake_case : str = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case : Tuple = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case : Union[str, Any] = train_result.metrics
__snake_case : Optional[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Tuple = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case : int = data_args.n_val
__snake_case : List[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case : str = trainer.predict(test_dataset=_snake_case , metric_key_prefix='''test''' )
__snake_case : Tuple = test_output.metrics
__snake_case : Tuple = data_args.n_test
if trainer.is_world_process_zero():
__snake_case : Dict = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
__snake_case : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__snake_case : Optional[int] = lmap(str.strip , _snake_case )
write_txt_file(_snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowercase ( _snake_case : Tuple ) ->List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =BlenderbotSmallConfig
lowerCamelCase__ ={}
lowerCamelCase__ ='gelu'
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : str = use_labels
__snake_case : str = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Union[str, Any] = eos_token_id
__snake_case : Dict = pad_token_id
__snake_case : List[str] = bos_token_id
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : List[str] = prepare_blenderbot_small_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = TFBlenderbotSmallModel(config=a_ ).get_decoder()
__snake_case : Tuple = inputs_dict['''input_ids''']
__snake_case : Union[str, Any] = input_ids[:1, :]
__snake_case : Any = inputs_dict['''attention_mask'''][:1, :]
__snake_case : Optional[int] = inputs_dict['''head_mask''']
__snake_case : List[Any] = 1
# first forward pass
__snake_case : Any = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
__snake_case , __snake_case : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case : Optional[int] = model(a_ , attention_mask=a_ )[0]
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_ , a_ , rtol=1E-3 )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str]=None , _snake_case : List[str]=None , _snake_case : str=None , _snake_case : Tuple=None , _snake_case : List[str]=None , ) ->int:
"""simple docstring"""
if attention_mask is None:
__snake_case : Any = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase__ =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ =(
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = TFBlenderbotSmallModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =[
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowerCamelCase__ ='facebook/blenderbot_small-90M'
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.tokenizer(self.src_text , return_tensors='''tf''' )
__snake_case : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a_ , )
__snake_case : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a_ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 24 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _snake_case : str , _snake_case : str ) ->Any:
"""simple docstring"""
__snake_case : int = RobertaPreLayerNormConfig.from_pretrained(
_snake_case , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
__snake_case : Optional[int] = torch.load(hf_hub_download(repo_id=_snake_case , filename='''pytorch_model.bin''' ) )
__snake_case : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
__snake_case : Union[str, Any] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
__snake_case : Tuple = tensor_value
__snake_case : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
__snake_case : int = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 24 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def lowercase ( _snake_case : str = "." ) ->Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_snake_case ):
__snake_case : Optional[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_snake_case )[1] in (".py", ".ipynb"):
yield os.path.join(_snake_case , _snake_case ).lstrip('''./''' )
def lowercase ( _snake_case : Tuple ) ->Tuple:
"""simple docstring"""
return f"""{i * ' '}*""" if i else "\n##"
def lowercase ( _snake_case : str , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_snake_case ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(_snake_case )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def lowercase ( _snake_case : str = "." ) ->None:
"""simple docstring"""
__snake_case : Union[str, Any] = ''''''
for filepath in sorted(good_file_paths(_snake_case ) ):
__snake_case , __snake_case : List[str] = os.path.split(_snake_case )
if filepath != old_path:
__snake_case : Dict = print_path(_snake_case , _snake_case )
__snake_case : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__snake_case : List[Any] = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
__snake_case : Tuple = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"""{md_prefix(_snake_case )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , ) ->float:
"""simple docstring"""
__snake_case : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
__snake_case : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
__snake_case : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
"""simple docstring"""
from string import ascii_uppercase
SCREAMING_SNAKE_CASE : Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE : Optional[Any] = dict(enumerate(ascii_uppercase))
def lowercase ( _snake_case : str , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Dict = len(_snake_case )
__snake_case : List[Any] = 0
while True:
if x == i:
__snake_case : Dict = 0
if len(_snake_case ) == len(_snake_case ):
break
key += key[i]
i += 1
return key
def lowercase ( _snake_case : str , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : List[str] = ''''''
__snake_case : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__snake_case : Union[str, Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase ( _snake_case : str , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Optional[int] = ''''''
__snake_case : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__snake_case : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : int = '''THE GERMAN ATTACK'''
__snake_case : Tuple = '''SECRET'''
__snake_case : int = generate_key(_snake_case , _snake_case )
__snake_case : Dict = cipher_text(_snake_case , _snake_case )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(_snake_case , _snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] ) ->int:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase ( _snake_case : Any , _snake_case : int , _snake_case : int ) ->Any:
"""simple docstring"""
return max(metric_fn(_snake_case , _snake_case ) for gt in ground_truths )
def lowercase ( _snake_case : Any , _snake_case : Tuple , _snake_case : Any ) ->int:
"""simple docstring"""
__snake_case : Union[str, Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
__snake_case : Dict = []
if args.gold_data_mode == "qa":
__snake_case : Optional[int] = pd.read_csv(_snake_case , sep='''\t''' , header=_snake_case )
for answer_list in data[1]:
__snake_case : Union[str, Any] = ast.literal_eval(_snake_case )
answers.append(_snake_case )
else:
__snake_case : Optional[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
__snake_case : Optional[int] = [[reference] for reference in references]
__snake_case : Tuple = 0
for prediction, ground_truths in zip(_snake_case , _snake_case ):
total += 1
em += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
fa += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
__snake_case : Tuple = 100.0 * em / total
__snake_case : List[Any] = 100.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = args.k
__snake_case : Optional[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
__snake_case : int = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
__snake_case : Tuple = 0
for hypo, reference in zip(_snake_case , _snake_case ):
__snake_case : Dict = set(hypo.split('''\t''' )[:k] )
__snake_case : Optional[Any] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__snake_case : Optional[Any] = 100.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def lowercase ( _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any] ) ->List[str]:
"""simple docstring"""
def strip_title(_snake_case : Optional[Any] ):
if title.startswith('''"''' ):
__snake_case : List[Any] = title[1:]
if title.endswith('''"''' ):
__snake_case : Union[str, Any] = title[:-1]
return title
__snake_case : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case , )['''input_ids'''].to(args.device )
__snake_case : int = rag_model.rag.question_encoder(_snake_case )
__snake_case : Optional[int] = question_enc_outputs[0]
__snake_case : Optional[int] = rag_model.retriever(
_snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__snake_case : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__snake_case : List[Any] = []
for docs in all_docs:
__snake_case : List[str] = [strip_title(_snake_case ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_snake_case ) )
return provenance_strings
def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str ) ->Tuple:
"""simple docstring"""
with torch.no_grad():
__snake_case : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case )
__snake_case : Union[str, Any] = inputs_dict.input_ids.to(args.device )
__snake_case : int = inputs_dict.attention_mask.to(args.device )
__snake_case : Tuple = rag_model.generate( # rag_model overwrites generate
_snake_case , attention_mask=_snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__snake_case : Any = rag_model.retriever.generator_tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
if args.print_predictions:
for q, a in zip(_snake_case , _snake_case ):
logger.info('''Q: {} - A: {}'''.format(_snake_case , _snake_case ) )
return answers
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_snake_case , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_snake_case , choices=['''exact''', '''compressed''', '''legacy'''] , type=_snake_case , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_snake_case , type=_snake_case , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_snake_case , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_snake_case , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_snake_case , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_snake_case , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_snake_case , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_snake_case , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_snake_case , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_snake_case , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_snake_case , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__snake_case : Any = parser.parse_args()
__snake_case : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def lowercase ( _snake_case : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = {}
if args.model_type is None:
__snake_case : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
__snake_case : int = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__snake_case : Optional[Any] = args.n_docs
if args.index_name is not None:
__snake_case : Tuple = args.index_name
if args.index_path is not None:
__snake_case : str = args.index_path
else:
__snake_case : Optional[Any] = BartForConditionalGeneration
__snake_case : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _snake_case )
__snake_case : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__snake_case : Optional[Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_snake_case ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
__snake_case : int = RagRetriever.from_pretrained(_snake_case , **_snake_case )
__snake_case : str = model_class.from_pretrained(_snake_case , retriever=_snake_case , **_snake_case )
model.retriever.init_retrieval()
else:
__snake_case : List[str] = model_class.from_pretrained(_snake_case , **_snake_case )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
__snake_case : Dict = []
for line in tqdm(_snake_case ):
questions.append(line.strip() )
if len(_snake_case ) == args.eval_batch_size:
__snake_case : Tuple = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) + '''\n''' )
preds_file.flush()
__snake_case : Dict = []
if len(_snake_case ) > 0:
__snake_case : Optional[Any] = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) )
preds_file.flush()
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = get_args()
main(args)
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowercase ( _snake_case : SplitDict ) ->Optional[int]:
"""simple docstring"""
__snake_case : str = split_dict._to_yaml_list()
assert len(_snake_case ) == len(_snake_case )
__snake_case : Optional[Any] = SplitDict._from_yaml_list(_snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__snake_case : Any = None
# the split name of split_dict takes over the name of the split info object
__snake_case : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_snake_case ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowercase ( _snake_case : Optional[Any] ) ->int:
"""simple docstring"""
__snake_case : Tuple = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : list[int] ) ->list[int]: # This function is recursive
"""simple docstring"""
__snake_case : int = len(_snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case : Optional[Any] = array[0]
__snake_case : Optional[Any] = False
__snake_case : List[Any] = 1
__snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case : Optional[Any] = True
__snake_case : List[str] = [element for element in array[i:] if element >= array[i]]
__snake_case : Dict = longest_subsequence(_snake_case )
if len(_snake_case ) > len(_snake_case ):
__snake_case : List[Any] = temp_array
else:
i += 1
__snake_case : Union[str, Any] = [element for element in array[1:] if element >= pivot]
__snake_case : str = [pivot, *longest_subsequence(_snake_case )]
if len(_snake_case ) > len(_snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def lowercase ( _snake_case : List[Any] , _snake_case : List[str]=None , _snake_case : List[Any]=None , _snake_case : Optional[Any]=None ) ->str:
"""simple docstring"""
__snake_case : List[Any] = True
while ask_again:
__snake_case : str = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def lowercase ( _snake_case : int , _snake_case : List[Any]=[] , _snake_case : str=None , _snake_case : Optional[Any]=0 ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = BulletMenu(_snake_case , _snake_case )
__snake_case : List[str] = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def lowercase ( _snake_case : Dict ) ->List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = int(_snake_case )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _snake_case : Any ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = int(_snake_case )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _snake_case : List[Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Optional[int] = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _snake_case : Tuple ) ->List[Any]:
"""simple docstring"""
__snake_case : Any = int(_snake_case )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _snake_case : str ) ->Optional[int]:
"""simple docstring"""
__snake_case : Any = int(_snake_case )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class _UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = super()._format_usage(a_ , a_ , a_ , a_ )
__snake_case : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE : str = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def lowercase ( _snake_case : List[Any] ) ->List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : str = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Any = list(s_dict.keys() )
for key in keys:
__snake_case : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__snake_case : Any = new_key.replace(_snake_case , _snake_case )
print(f"""{key} -> {new_key}""" )
__snake_case : str = s_dict.pop(_snake_case )
return s_dict
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
__snake_case , __snake_case : int = emb.weight.shape
__snake_case : List[str] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
__snake_case : Any = emb.weight.data
return lin_layer
def lowercase ( _snake_case : str , _snake_case : str ) ->bytes:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : List[str] = os.path.basename(_snake_case )
__snake_case : str = url.split('''/''' )[-2]
__snake_case : Optional[int] = os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ) and not os.path.isfile(_snake_case ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_snake_case ):
__snake_case : List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_snake_case ) as source, open(_snake_case , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_snake_case , unit_divisor=1_024 ) as loop:
while True:
__snake_case : Union[str, Any] = source.read(8_192 )
if not buffer:
break
output.write(_snake_case )
loop.update(len(_snake_case ) )
__snake_case : List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowercase ( _snake_case : Any , _snake_case : int ) ->str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
__snake_case : Any = _download(_MODELS[checkpoint_path] )
else:
__snake_case : Optional[Any] = torch.load(_snake_case , map_location='''cpu''' )
__snake_case : int = original_checkpoint['''dims''']
__snake_case : List[Any] = original_checkpoint['''model_state_dict''']
__snake_case : List[str] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
__snake_case : Optional[int] = True
__snake_case : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
__snake_case : List[Any] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_snake_case , decoder_ffn_dim=_snake_case , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
__snake_case : int = WhisperForConditionalGeneration(_snake_case )
__snake_case , __snake_case : List[Any] = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__snake_case : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__snake_case : List[str] = proj_out_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( _snake_case : list[float] ) ->list[float]:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = len(_snake_case )
for i in range(_snake_case ):
__snake_case : float = -1
for j in range(i + 1 , _snake_case ):
if arr[i] < arr[j]:
__snake_case : List[Any] = arr[j]
break
result.append(_snake_case )
return result
def lowercase ( _snake_case : list[float] ) ->list[float]:
"""simple docstring"""
__snake_case : Tuple = []
for i, outer in enumerate(_snake_case ):
__snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__snake_case : Optional[int] = inner
break
result.append(_snake_case )
return result
def lowercase ( _snake_case : list[float] ) ->list[float]:
"""simple docstring"""
__snake_case : Any = len(_snake_case )
__snake_case : list[float] = []
__snake_case : list[float] = [-1] * arr_size
for index in reversed(range(_snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__snake_case : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE : Tuple = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE : Optional[int] = datasets.load_iris()
SCREAMING_SNAKE_CASE : int = np.array(data["""data"""])
SCREAMING_SNAKE_CASE : Tuple = np.array(data["""target"""])
SCREAMING_SNAKE_CASE : Optional[Any] = data["""target_names"""]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = train_test_split(X, y)
def lowercase ( _snake_case : str , _snake_case : Optional[int] ) ->str:
"""simple docstring"""
return np.linalg.norm(np.array(_snake_case ) - np.array(_snake_case ) )
def lowercase ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]=5 ) ->Any:
"""simple docstring"""
__snake_case : int = zip(_snake_case , _snake_case )
# List of distances of all points from the point to be classified
__snake_case : List[str] = []
for data_point in data:
__snake_case : List[str] = euclidean_distance(data_point[0] , _snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : str = [i[1] for i in sorted(_snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Dict = Counter(_snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 24 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowercase ( ) ->Dict:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case : int = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , _snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
assert _test_patching.open is open
__snake_case : str = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Tuple = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , _snake_case ):
pass
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _snake_case ) is None
with patch_submodule(_test_patching , '''len''' , _snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case : Optional[int] = patch_submodule(_test_patching , '''open''' , _snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowercase ( ) ->Any:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case : Dict = '''__test_patch_submodule_successive_join__'''
__snake_case : Optional[Any] = '''__test_patch_submodule_successive_dirname__'''
__snake_case : Dict = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _snake_case ):
with patch_submodule(_test_patching , '''os.rename''' , _snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _snake_case ):
with patch_submodule(_test_patching , '''os.path.join''' , _snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
__snake_case : str = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _snake_case ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _snake_case ):
pass
| 24 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : int = 4_000_000 ) ->int:
"""simple docstring"""
__snake_case : List[str] = [0, 1]
__snake_case : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__snake_case : Union[str, Any] = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE : Any = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =CodeGenTokenizer
def __init__(self , a_=None , a_=None , a_=None , a_="<|endoftext|>" , a_="<|endoftext|>" , a_="<|endoftext|>" , a_=False , **a_ , ):
'''simple docstring'''
super().__init__(
a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , add_prefix_space=a_ , **a_ , )
if kwargs.pop('''add_bos_token''' , a_ ):
__snake_case : Any = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
__snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , a_ ) != add_prefix_space:
__snake_case : List[Any] = getattr(a_ , pre_tok_state.pop('''type''' ) )
__snake_case : int = add_prefix_space
__snake_case : int = pre_tok_class(**a_ )
__snake_case : Union[str, Any] = add_prefix_space
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = kwargs.get('''is_split_into_words''' , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
__snake_case : Optional[int] = kwargs.get('''is_split_into_words''' , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Union[str, Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : int = super().decode(
token_ids=a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , **a_ , )
if truncate_before_pattern is not None and len(a_ ) > 0:
__snake_case : List[Any] = self.truncate(a_ , a_ )
return decoded_text
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
def find_re(a_ , a_ , a_ ):
__snake_case : List[str] = pattern.search(a_ , a_ )
return m.start() if m else -1
__snake_case : Optional[int] = [re.compile(a_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__snake_case : Any = list(re.finditer('''^print''' , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
__snake_case : List[str] = completion[: prints[1].start()]
__snake_case : Dict = list(re.finditer('''^def''' , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
__snake_case : Optional[Any] = completion[: defs[1].start()]
__snake_case : Dict = 0
__snake_case : List[Any] = [
pos for pos in [find_re(a_ , a_ , a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : str = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( _snake_case : List[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : List[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__snake_case : List[str] = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _snake_case , )
is not None
):
__snake_case : Optional[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__snake_case : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__snake_case : Optional[int] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
__snake_case : Tuple = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
__snake_case : Union[str, Any] = True
if not attribute_used:
__snake_case : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__snake_case : str = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__snake_case : str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__snake_case : Optional[int] = True
elif attribute.endswith('''_token_id''' ):
__snake_case : Dict = True
# configuration class specific cases
if not case_allowed:
__snake_case : Optional[Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__snake_case : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( _snake_case : str ) ->int:
"""simple docstring"""
__snake_case : Union[str, Any] = dict(inspect.signature(config_class.__init__ ).parameters )
__snake_case : List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
__snake_case : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__snake_case : Any = {}
if len(config_class.attribute_map ) > 0:
__snake_case : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__snake_case : Optional[int] = inspect.getsourcefile(_snake_case )
__snake_case : Union[str, Any] = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__snake_case : Dict = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith('''modeling_''' )]
# Get the source code strings
__snake_case : str = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
__snake_case : Any = []
for config_param, default_value in zip(_snake_case , _snake_case ):
# `attributes` here is all the variant names for `config_param`
__snake_case : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__snake_case : Union[str, Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case , _snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__snake_case : str = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
__snake_case : Optional[int] = unused_attributes
if len(_snake_case ) > 0:
__snake_case : List[Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 24 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ = 0 ):
'''simple docstring'''
__snake_case : Tuple = key
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
__snake_case : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(a_ ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
__snake_case : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(a_ ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = 0 ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
__snake_case : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__snake_case : Optional[Any] = ''''''
for ch in content:
ans += chr(ord(a_ ) ^ key )
return ans
def SCREAMING_SNAKE_CASE (self , a_ , a_ = 0 ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
__snake_case : int = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__snake_case : int = ''''''
for ch in content:
ans += chr(ord(a_ ) ^ key )
return ans
def SCREAMING_SNAKE_CASE (self , a_ , a_ = 0 ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
try:
with open(a_ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(a_ , a_ ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
try:
with open(a_ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(a_ , a_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : str = data
__snake_case : int = None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
__snake_case : Optional[int] = None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.head
while temp is not None:
print(temp.data , end=''' ''' )
__snake_case : List[Any] = temp.next
print()
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = Node(a_ )
__snake_case : Dict = self.head
__snake_case : List[Any] = new_node
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
__snake_case : int = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case : Tuple = node_a.next
__snake_case : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case : int = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 24 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_=0.0 , a_ = None , a_ = "geglu" , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = "layer_norm" , a_ = False , ):
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = only_cross_attention
__snake_case : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__snake_case : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__snake_case : List[Any] = AdaLayerNorm(a_ , a_ )
elif self.use_ada_layer_norm_zero:
__snake_case : List[str] = AdaLayerNormZero(a_ , a_ )
else:
__snake_case : List[str] = nn.LayerNorm(a_ , elementwise_affine=a_ )
__snake_case : List[Any] = Attention(
query_dim=a_ , heads=a_ , dim_head=a_ , dropout=a_ , bias=a_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__snake_case : Union[str, Any] = (
AdaLayerNorm(a_ , a_ )
if self.use_ada_layer_norm
else nn.LayerNorm(a_ , elementwise_affine=a_ )
)
__snake_case : Union[str, Any] = Attention(
query_dim=a_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a_ , dim_head=a_ , dropout=a_ , bias=a_ , upcast_attention=a_ , ) # is self-attn if encoder_hidden_states is none
else:
__snake_case : Dict = None
__snake_case : Optional[int] = None
# 3. Feed-forward
__snake_case : Optional[Any] = nn.LayerNorm(a_ , elementwise_affine=a_ )
__snake_case : Union[str, Any] = FeedForward(a_ , dropout=a_ , activation_fn=a_ , final_dropout=a_ )
# let chunk size default to None
__snake_case : Union[str, Any] = None
__snake_case : Any = 0
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = chunk_size
__snake_case : Any = dim
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
__snake_case : str = self.norma(a_ , a_ )
elif self.use_ada_layer_norm_zero:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = self.norma(
a_ , a_ , a_ , hidden_dtype=hidden_states.dtype )
else:
__snake_case : Dict = self.norma(a_ )
__snake_case : Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__snake_case : Union[str, Any] = self.attna(
a_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a_ , **a_ , )
if self.use_ada_layer_norm_zero:
__snake_case : Tuple = gate_msa.unsqueeze(1 ) * attn_output
__snake_case : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__snake_case : List[str] = (
self.norma(a_ , a_ ) if self.use_ada_layer_norm else self.norma(a_ )
)
__snake_case : Tuple = self.attna(
a_ , encoder_hidden_states=a_ , attention_mask=a_ , **a_ , )
__snake_case : str = attn_output + hidden_states
# 3. Feed-forward
__snake_case : Union[str, Any] = self.norma(a_ )
if self.use_ada_layer_norm_zero:
__snake_case : List[str] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__snake_case : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__snake_case : Dict = torch.cat(
[self.ff(a_ ) for hid_slice in norm_hidden_states.chunk(a_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__snake_case : str = self.ff(a_ )
if self.use_ada_layer_norm_zero:
__snake_case : Dict = gate_mlp.unsqueeze(1 ) * ff_output
__snake_case : List[str] = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ = None , a_ = 4 , a_ = 0.0 , a_ = "geglu" , a_ = False , ):
'''simple docstring'''
super().__init__()
__snake_case : Dict = int(dim * mult )
__snake_case : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__snake_case : Optional[int] = GELU(a_ , a_ )
if activation_fn == "gelu-approximate":
__snake_case : int = GELU(a_ , a_ , approximate='''tanh''' )
elif activation_fn == "geglu":
__snake_case : List[Any] = GEGLU(a_ , a_ )
elif activation_fn == "geglu-approximate":
__snake_case : Dict = ApproximateGELU(a_ , a_ )
__snake_case : Any = nn.ModuleList([] )
# project in
self.net.append(a_ )
# project dropout
self.net.append(nn.Dropout(a_ ) )
# project out
self.net.append(nn.Linear(a_ , a_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a_ ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for module in self.net:
__snake_case : str = module(a_ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = "none" ):
'''simple docstring'''
super().__init__()
__snake_case : Union[str, Any] = nn.Linear(a_ , a_ )
__snake_case : Tuple = approximate
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(a_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.proj(a_ )
__snake_case : str = self.gelu(a_ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : List[str] = nn.Linear(a_ , dim_out * 2 )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(a_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.proj(a_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a_ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : Union[str, Any] = nn.Linear(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.proj(a_ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : Optional[Any] = nn.Embedding(a_ , a_ )
__snake_case : Optional[int] = nn.SiLU()
__snake_case : Dict = nn.Linear(a_ , embedding_dim * 2 )
__snake_case : int = nn.LayerNorm(a_ , elementwise_affine=a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.linear(self.silu(self.emb(a_ ) ) )
__snake_case , __snake_case : Union[str, Any] = torch.chunk(a_ , 2 )
__snake_case : Dict = self.norm(a_ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : Optional[int] = CombinedTimestepLabelEmbeddings(a_ , a_ )
__snake_case : Union[str, Any] = nn.SiLU()
__snake_case : Union[str, Any] = nn.Linear(a_ , 6 * embedding_dim , bias=a_ )
__snake_case : List[Any] = nn.LayerNorm(a_ , elementwise_affine=a_ , eps=1E-6 )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_=None ):
'''simple docstring'''
__snake_case : List[str] = self.linear(self.silu(self.emb(a_ , a_ , hidden_dtype=a_ ) ) )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = emb.chunk(6 , dim=1 )
__snake_case : str = self.norm(a_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = 1E-5 ):
'''simple docstring'''
super().__init__()
__snake_case : List[str] = num_groups
__snake_case : int = eps
if act_fn is None:
__snake_case : List[str] = None
else:
__snake_case : int = get_activation(a_ )
__snake_case : Any = nn.Linear(a_ , out_dim * 2 )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
if self.act:
__snake_case : Dict = self.act(a_ )
__snake_case : List[str] = self.linear(a_ )
__snake_case : int = emb[:, :, None, None]
__snake_case , __snake_case : Union[str, Any] = emb.chunk(2 , dim=1 )
__snake_case : Any = F.group_norm(a_ , self.num_groups , eps=self.eps )
__snake_case : Any = x * (1 + scale) + shift
return x
| 24 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
__snake_case : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_snake_case )
if number < 0:
return False
__snake_case : Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase ( ) ->str:
"""simple docstring"""
__snake_case : Tuple = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_snake_case )
return parser.parse_args()
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : Dict = parse_args()
# Import training_script as a module.
__snake_case : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case : int = script_fpath.stem
__snake_case : str = importlib.import_module(_snake_case )
# Patch sys.argv
__snake_case : int = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase ( _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowercase ( _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def lowercase ( _snake_case : np.ndarray , _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
__snake_case : Optional[Any] = np.zeros_like(_snake_case )
__snake_case : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case : str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE : str = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE : List[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 24 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 | 1 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE : Dict = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""(_{2,})""")
SCREAMING_SNAKE_CASE : List[Any] = r"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE : Dict = r"""<>:/\|?*"""
def lowercase ( _snake_case : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = _uppercase_uppercase_re.sub(r'''\1_\2''' , _snake_case )
__snake_case : Any = _lowercase_uppercase_re.sub(r'''\1_\2''' , _snake_case )
return name.lower()
def lowercase ( _snake_case : Optional[Any] ) ->List[str]:
"""simple docstring"""
__snake_case : List[Any] = _single_underscore_re.split(_snake_case )
__snake_case : List[str] = [_multiple_underscores_re.split(_snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case ) if n != '''''' )
def lowercase ( _snake_case : Any ) ->Optional[int]:
"""simple docstring"""
if os.path.basename(_snake_case ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(_snake_case )
def lowercase ( _snake_case : Union[str, Any] , _snake_case : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
if os.path.basename(_snake_case ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , _snake_case ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(_snake_case )}-{split}"""
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : Optional[int]=None ) ->List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = filename_prefix_for_split(_snake_case , _snake_case )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
__snake_case : Dict = os.path.join(_snake_case , _snake_case )
return f"""{filepath}*"""
def lowercase ( _snake_case : Dict , _snake_case : int , _snake_case : int , _snake_case : Dict=None , _snake_case : Optional[int]=None ) ->Any:
"""simple docstring"""
__snake_case : Optional[Any] = filename_prefix_for_split(_snake_case , _snake_case )
__snake_case : Tuple = os.path.join(_snake_case , _snake_case )
if shard_lengths:
__snake_case : int = len(_snake_case )
__snake_case : Optional[int] = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_snake_case )]
if filetype_suffix:
__snake_case : str = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
__snake_case : Dict = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 24 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='albert'
def __init__(self , a_=3_00_00 , a_=1_28 , a_=40_96 , a_=12 , a_=1 , a_=64 , a_=1_63_84 , a_=1 , a_="gelu_new" , a_=0 , a_=0 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=0.1 , a_="absolute" , a_=0 , a_=2 , a_=3 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : List[Any] = vocab_size
__snake_case : Optional[int] = embedding_size
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_hidden_groups
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = inner_group_num
__snake_case : List[Any] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = classifier_dropout_prob
__snake_case : Any = position_embedding_type
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = 1
__snake_case : Tuple = 3
__snake_case : str = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : str = self.dummy_cond_unet_upscale
__snake_case : Dict = DDPMScheduler()
__snake_case : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case : int = self.dummy_vae
__snake_case : List[str] = self.dummy_text_encoder
__snake_case : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : List[str] = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case : Optional[int] = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__snake_case : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : Dict = '''A painting of a squirrel eating a burger'''
__snake_case : str = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : List[Any] = sd_pipe(
[prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Tuple = output.images
__snake_case : int = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : Optional[Any] = sd_pipe(
[prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=a_ , )[0]
__snake_case : Any = image[0, -3:, -3:, -1]
__snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
__snake_case : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__snake_case : int = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : List[str] = self.dummy_cond_unet_upscale
__snake_case : Union[str, Any] = DDPMScheduler()
__snake_case : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case : List[Any] = self.dummy_vae
__snake_case : List[Any] = self.dummy_text_encoder
__snake_case : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : List[str] = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case : Optional[Any] = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__snake_case : int = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Optional[int] = output.images
assert image.shape[0] == 2
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : Dict = sd_pipe(
[prompt] , image=a_ , generator=a_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case : List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.dummy_cond_unet_upscale
__snake_case : str = DDPMScheduler()
__snake_case : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case : int = self.dummy_vae
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : List[str] = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__snake_case : int = unet.half()
__snake_case : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__snake_case : Optional[int] = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : List[Any] = '''A painting of a squirrel eating a burger'''
__snake_case : Any = torch.manual_seed(0 )
__snake_case : Optional[Any] = sd_pipe(
[prompt] , image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , ).images
__snake_case : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__snake_case : int = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case : Tuple = StableDiffusionUpscalePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case : int = '''a cat sitting on a park bench'''
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : Optional[int] = pipe(
prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , )
__snake_case : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__snake_case : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case : Any = StableDiffusionUpscalePipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case : Optional[Any] = '''a cat sitting on a park bench'''
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = pipe(
prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , )
__snake_case : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case : str = StableDiffusionUpscalePipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case : int = '''a cat sitting on a park bench'''
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : int = pipe(
prompt=a_ , image=a_ , generator=a_ , num_inference_steps=5 , output_type='''np''' , )
__snake_case : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = 0 ):
'''simple docstring'''
__snake_case , __snake_case : str = row, column
__snake_case : str = [[default_value for c in range(a_ )] for r in range(a_ )]
def __str__(self ):
'''simple docstring'''
__snake_case : Union[str, Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__snake_case : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Union[str, Any] = max(a_ , len(str(a_ ) ) )
__snake_case : str = f"""%{max_element_length}s"""
# Make string and return
def single_line(a_ ) -> str:
nonlocal string_format_identifier
__snake_case : Any = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a_ ) for row_vector in self.array )
return s
def __repr__(self ):
'''simple docstring'''
return str(self )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not (isinstance(a_ , (list, tuple) ) and len(a_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self , a_ ):
'''simple docstring'''
assert self.validate_indicies(a_ )
return self.array[loc[0]][loc[1]]
def __setitem__(self , a_ , a_ ):
'''simple docstring'''
assert self.validate_indicies(a_ )
__snake_case : Dict = value
def __add__(self , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__(self ):
'''simple docstring'''
__snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[str] = -self[r, c]
return result
def __sub__(self , a_ ):
'''simple docstring'''
return self + (-another)
def __mul__(self , a_ ):
'''simple docstring'''
if isinstance(a_ , (int, float) ): # Scalar multiplication
__snake_case : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c] * another
return result
elif isinstance(a_ , a_ ): # Matrix multiplication
assert self.column == another.row
__snake_case : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : str = f"""Unsupported type given for another ({type(a_ )})"""
raise TypeError(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Any = self[r, c]
return result
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : Tuple = v.transpose()
__snake_case : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Union[str, Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__snake_case : Union[str, Any] = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 2, -3
__snake_case : Optional[Any] = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Dict = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}""" )
def lowercase ( ) ->None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
SCREAMING_SNAKE_CASE : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='whisper'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=5_18_65 , a_=80 , a_=6 , a_=4 , a_=6 , a_=4 , a_=15_36 , a_=15_36 , a_=0.0 , a_=0.0 , a_=5_02_57 , a_=True , a_=True , a_="gelu" , a_=2_56 , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.02 , a_=False , a_=15_00 , a_=4_48 , a_=5_02_56 , a_=5_02_56 , a_=5_02_56 , a_=None , a_=[2_20, 5_02_56] , a_=False , a_=2_56 , a_=False , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=7 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : Optional[int] = num_mel_bins
__snake_case : int = d_model
__snake_case : Any = encoder_layers
__snake_case : int = encoder_attention_heads
__snake_case : List[Any] = decoder_layers
__snake_case : Any = decoder_attention_heads
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Optional[Any] = encoder_ffn_dim
__snake_case : Tuple = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[int] = activation_dropout
__snake_case : List[str] = activation_function
__snake_case : Any = init_std
__snake_case : Tuple = encoder_layerdrop
__snake_case : Dict = decoder_layerdrop
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = encoder_layers
__snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Optional[Any] = max_source_positions
__snake_case : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__snake_case : List[Any] = classifier_proj_size
__snake_case : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : List[Any] = apply_spec_augment
__snake_case : List[Any] = mask_time_prob
__snake_case : int = mask_time_length
__snake_case : Any = mask_time_min_masks
__snake_case : Union[str, Any] = mask_feature_prob
__snake_case : Optional[Any] = mask_feature_length
__snake_case : List[str] = mask_feature_min_masks
__snake_case : Dict = median_filter_width
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case : int = {0: '''batch'''}
else:
__snake_case : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , a_ = 2_20_50 , a_ = 5.0 , a_ = 2_20 , ):
'''simple docstring'''
__snake_case : Tuple = OrderedDict()
__snake_case : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , )
__snake_case : int = encoder_inputs['''input_features'''].shape[2]
__snake_case : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
__snake_case : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = encoder_inputs.pop('''input_features''' )
__snake_case : str = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__snake_case : Union[str, Any] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-3
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : list[int] , _snake_case : int ) ->bool:
"""simple docstring"""
if len(_snake_case ) == 0:
return False
__snake_case : int = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE : int = [int(item.strip()) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE : List[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE : Dict = """""" if binary_search(sequence, target) else """not """
print(F'{target} was {not_str}found in {sequence}')
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Any = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='facebook/nllb-200-distilled-600M'
lowerCamelCase__ =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCamelCase__ ='translator'
lowerCamelCase__ =AutoTokenizer
lowerCamelCase__ =AutoModelForSeqaSeqLM
lowerCamelCase__ =LANGUAGE_CODES
lowerCamelCase__ =['text', 'text', 'text']
lowerCamelCase__ =['text']
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__snake_case : Dict = self.lang_to_code[src_lang]
__snake_case : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a_ , return_tensors='''pt''' , src_lang=a_ , tgt_lang=a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.model.generate(**a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a_ )
| 24 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='vit_mae'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=2_24 , a_=16 , a_=3 , a_=True , a_=16 , a_=5_12 , a_=8 , a_=20_48 , a_=0.75 , a_=False , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Any = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = qkv_bias
__snake_case : str = decoder_num_attention_heads
__snake_case : Tuple = decoder_hidden_size
__snake_case : str = decoder_num_hidden_layers
__snake_case : int = decoder_intermediate_size
__snake_case : Dict = mask_ratio
__snake_case : List[Any] = norm_pix_loss
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : int = """src/transformers"""
SCREAMING_SNAKE_CASE : Optional[int] = """docs/source/en/tasks"""
def lowercase ( _snake_case : int , _snake_case : str , _snake_case : Tuple ) ->List[Any]:
"""simple docstring"""
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case : str = f.readlines()
# Find the start prompt.
__snake_case : Optional[int] = 0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
__snake_case : List[Any] = start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowercase ( _snake_case : Any ) ->int:
"""simple docstring"""
__snake_case : str = TASK_GUIDE_TO_MODELS[task_guide]
__snake_case : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_snake_case , set() )
__snake_case : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowercase ( _snake_case : Dict , _snake_case : int=False ) ->int:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = _find_text_in_file(
filename=os.path.join(_snake_case , _snake_case ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__snake_case : int = get_model_list_for_task(_snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 24 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 1 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Dict ) ->Tuple:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase ( _snake_case : np.ndarray , _snake_case : Optional[str] , _snake_case : Optional[str] = None ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__snake_case : List[Any] = to_pil_image(_snake_case )
__snake_case , __snake_case : str = pil_image.size
__snake_case : int = pytesseract.image_to_data(_snake_case , lang=_snake_case , output_type='''dict''' , config=_snake_case )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__snake_case : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
__snake_case : List[str] = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
__snake_case : Any = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__snake_case : Optional[int] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__snake_case : Dict = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__snake_case : Optional[Any] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__snake_case : List[Any] = []
for x, y, w, h in zip(_snake_case , _snake_case , _snake_case , _snake_case ):
__snake_case : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
__snake_case : Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case , _snake_case , _snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__(self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = "" , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Optional[int] = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__snake_case : Dict = get_size_dict(a_ )
__snake_case : Any = do_resize
__snake_case : List[Any] = size
__snake_case : int = resample
__snake_case : Optional[int] = apply_ocr
__snake_case : Dict = ocr_lang
__snake_case : str = tesseract_config
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__snake_case : int = (size['''height'''], size['''width'''])
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
'''simple docstring'''
__snake_case : int = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = size if size is not None else self.size
__snake_case : Any = get_size_dict(a_ )
__snake_case : Dict = resample if resample is not None else self.resample
__snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
__snake_case : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
__snake_case : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
__snake_case : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__snake_case : Any = [to_numpy_array(a_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__snake_case : Union[str, Any] = []
__snake_case : Optional[Any] = []
for image in images:
__snake_case , __snake_case : Optional[Any] = apply_tesseract(a_ , a_ , a_ )
words_batch.append(a_ )
boxes_batch.append(a_ )
if do_resize:
__snake_case : List[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__snake_case : List[Any] = [flip_channel_order(a_ ) for image in images]
__snake_case : Union[str, Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=a_ )
if apply_ocr:
__snake_case : Optional[Any] = words_batch
__snake_case : int = boxes_batch
return data
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : list[int] , _snake_case : int ) ->list[list[int]]:
"""simple docstring"""
__snake_case : list[list[int]] = []
__snake_case : list[int] = []
__snake_case : Any = 0
__snake_case : List[str] = sum(_snake_case )
create_state_space_tree(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return result
def lowercase ( _snake_case : list[int] , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : int , ) ->None:
"""simple docstring"""
if sum(_snake_case ) > max_sum or (remaining_nums_sum + sum(_snake_case )) < max_sum:
return
if sum(_snake_case ) == max_sum:
result.append(_snake_case )
return
for index in range(_snake_case , len(_snake_case ) ):
create_state_space_tree(
_snake_case , _snake_case , index + 1 , [*path, nums[index]] , _snake_case , remaining_nums_sum - nums[index] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 34, 4, 12, 5, 2]
SCREAMING_SNAKE_CASE : Dict = 9
SCREAMING_SNAKE_CASE : Optional[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
__snake_case : Optional[int] = set()
# Replace all the whitespace in our sentence
__snake_case : str = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_snake_case ) == 26
def lowercase ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
__snake_case : Tuple = [False] * 26
for char in input_str:
if char.islower():
__snake_case : Any = True
elif char.isupper():
__snake_case : Dict = True
return all(_snake_case )
def lowercase ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) ->None:
"""simple docstring"""
from timeit import timeit
__snake_case : List[str] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=_snake_case ) )
print(timeit('''is_pangram_faster()''' , setup=_snake_case ) )
print(timeit('''is_pangram_fastest()''' , setup=_snake_case ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='poolformer'
def __init__(self , a_=3 , a_=16 , a_=16 , a_=3 , a_=4.0 , a_=[2, 2, 6, 2] , a_=[64, 1_28, 3_20, 5_12] , a_=[7, 3, 3, 3] , a_=[4, 2, 2, 2] , a_=[2, 1, 1, 1] , a_=4 , a_=0.0 , a_="gelu" , a_=True , a_=1E-5 , a_=0.02 , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = num_channels
__snake_case : str = patch_size
__snake_case : int = stride
__snake_case : Optional[int] = padding
__snake_case : Any = pool_size
__snake_case : int = hidden_sizes
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = depths
__snake_case : Optional[int] = patch_sizes
__snake_case : Optional[Any] = strides
__snake_case : Optional[int] = num_encoder_blocks
__snake_case : List[str] = drop_path_rate
__snake_case : Dict = hidden_act
__snake_case : Optional[Any] = use_layer_scale
__snake_case : int = layer_scale_init_value
__snake_case : int = initializer_range
super().__init__(**a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 2E-3
| 24 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : str , _snake_case : str ) ->list:
"""simple docstring"""
__snake_case : Any = len(_snake_case )
__snake_case : Tuple = []
for i in range(len(_snake_case ) - pat_len + 1 ):
__snake_case : Optional[Any] = True
for j in range(_snake_case ):
if s[i + j] != pattern[j]:
__snake_case : Tuple = False
break
if match_found:
position.append(_snake_case )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE : Tuple = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE : int = {
"""openbmb/cpm-ant-10b""": 1024,
}
def lowercase ( _snake_case : str ) ->List[str]:
"""simple docstring"""
__snake_case : List[str] = collections.OrderedDict()
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as reader:
__snake_case : Optional[Any] = reader.readlines()
for index, token in enumerate(_snake_case ):
__snake_case : Union[str, Any] = token.rstrip('''\n''' )
__snake_case : Tuple = index
return vocab
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_="<unk>" , a_=2_00 ):
'''simple docstring'''
__snake_case : Dict = vocab
__snake_case : Dict = unk_token
__snake_case : Optional[Any] = max_input_chars_per_word
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
__snake_case : Union[str, Any] = 0
__snake_case : Union[str, Any] = []
while start < len(a_ ):
__snake_case : int = len(a_ )
__snake_case : List[Any] = None
while start < end:
__snake_case : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__snake_case : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
__snake_case : Tuple = end
return sub_tokens
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =False
def __init__(self , a_ , a_="<d>" , a_="</d>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_="<unk>" , a_="</n>" , a_="</_>" , a_="left" , **a_ , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
__snake_case : Dict = bod_token
__snake_case : Dict = eod_token
__snake_case : Optional[Any] = load_vocab(a_ )
__snake_case : Dict = self.encoder[space_token]
__snake_case : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__snake_case : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
__snake_case : Optional[int] = {v: k for k, v in self.encoder.items()}
__snake_case : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = [i for i in token_ids if i >= 0]
__snake_case : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return token in self.encoder
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return "".join(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(a_ , self.unk_token )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if os.path.isdir(a_ ):
__snake_case : Any = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__snake_case : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__snake_case : Any = 0
if " " in self.encoder:
__snake_case : Any = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__snake_case : List[str] = self.encoder['''\n''']
del self.encoder["\n"]
__snake_case : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__snake_case : Tuple = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 24 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE : Optional[Any] = """
import os
"""
SCREAMING_SNAKE_CASE : Optional[Any] = """
def foo():
import os
return False
"""
SCREAMING_SNAKE_CASE : int = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
SCREAMING_SNAKE_CASE : str = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Tuple = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Optional[int] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Tuple = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : int = """
import os
try:
import bar
except:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def lowercase ( _snake_case : Any , _snake_case : Any ) ->str:
"""simple docstring"""
__snake_case : List[Any] = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
__snake_case : int = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case : Dict = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a_ ) , x.transpose() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
__snake_case : List[Any] = torch.tensor(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : List[Any] = tf.constant(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : Tuple = jnp.array(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , np.asarray(transpose(a_ ) ) ) )
__snake_case : Any = np.random.randn(3 , 4 , 5 )
__snake_case : Optional[Any] = jnp.array(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , np.asarray(transpose(a_ , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.reshape(a_ , (4, 3) ) ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.reshape(a_ , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = np.random.randn(3 , 4 )
__snake_case : Union[str, Any] = torch.tensor(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) )
__snake_case : str = np.random.randn(3 , 4 , 5 )
__snake_case : Optional[Any] = torch.tensor(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : str = tf.constant(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : List[Any] = tf.constant(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : str = jnp.array(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.asarray(reshape(a_ , (4, 3) ) ) ) )
__snake_case : str = np.random.randn(3 , 4 , 5 )
__snake_case : str = jnp.array(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.asarray(reshape(a_ , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a_ ) , np.squeeze(a_ ) ) )
__snake_case : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.squeeze(a_ , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = np.random.randn(1 , 3 , 4 )
__snake_case : List[str] = torch.tensor(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : str = torch.tensor(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = np.random.randn(1 , 3 , 4 )
__snake_case : Dict = tf.constant(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) )
__snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : int = tf.constant(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = np.random.randn(1 , 3 , 4 )
__snake_case : Any = jnp.array(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , np.asarray(squeeze(a_ ) ) ) )
__snake_case : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Tuple = jnp.array(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.asarray(squeeze(a_ , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.expand_dims(a_ , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = torch.tensor(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : List[str] = jnp.array(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.asarray(expand_dims(a_ , axis=1 ) ) ) )
| 24 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =IFPipeline
lowerCamelCase__ =TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowerCamelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Any = torch.manual_seed(a_ )
else:
__snake_case : Optional[int] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__snake_case : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__snake_case , __snake_case : Any = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__snake_case : List[str] = None
__snake_case : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__snake_case : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
__snake_case : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__snake_case : Any = IFInpaintingPipeline(**pipe_a.components )
__snake_case : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Optional[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__snake_case : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : List[str] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Any = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Dict = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
__snake_case : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(a_ )
__snake_case : Tuple = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 24 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=False , a_=True , a_="None" , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : Dict = parent
__snake_case : Dict = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : int = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Tuple = use_labels
__snake_case : Any = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Optional[int] = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Dict = num_labels
__snake_case : List[str] = num_choices
__snake_case : int = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Dict = pos_att_type
__snake_case : Optional[Any] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str = None
if self.use_input_mask:
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Any = None
__snake_case : int = None
__snake_case : Tuple = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.get_config()
__snake_case : str = 3_00
return config
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
__snake_case : str = model(a_ , token_type_ids=a_ )[0]
__snake_case : Dict = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : Union[str, Any] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self.num_labels
__snake_case : str = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Tuple = config_and_inputs
__snake_case : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = DebertaModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__snake_case : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__snake_case : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : Optional[int] = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
__snake_case : int = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 24 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
super().__init__(*a_ , **a_ )
requires_backends(self , '''vision''' )
self.check_model_type(a_ )
def __call__(self , a_ , **a_ ):
'''simple docstring'''
return super().__call__(a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
return {}, {}, {}
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = load_image(a_ )
__snake_case : Optional[int] = image.size
__snake_case : int = self.image_processor(images=a_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = model_outputs.predicted_depth
__snake_case : Tuple = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a_ )
__snake_case : Optional[Any] = prediction.squeeze().cpu().numpy()
__snake_case : str = (output * 2_55 / np.max(a_ )).astype('''uint8''' )
__snake_case : List[Any] = Image.fromarray(a_ )
__snake_case : List[str] = {}
__snake_case : Union[str, Any] = predicted_depth
__snake_case : Tuple = depth
return output_dict
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 | 1 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase ( _snake_case : str ) ->Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
def lowercase ( _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
__snake_case : Dict = parser.add_parser('''env''' )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=a_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a_ )
def __init__(self , a_ , *a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = accelerate_config_file
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''not installed'''
if is_safetensors_available():
import safetensors
__snake_case : str = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__snake_case : Any = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
__snake_case : List[str] = '''not installed'''
__snake_case : str = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__snake_case : str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
__snake_case : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
__snake_case : str = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f"""\t{accelerate_config}"""
)
__snake_case : Any = '''not installed'''
__snake_case : Union[str, Any] = '''NA'''
if is_torch_available():
import torch
__snake_case : int = torch.__version__
__snake_case : List[Any] = torch.cuda.is_available()
__snake_case : int = '''not installed'''
__snake_case : int = '''NA'''
if is_tf_available():
import tensorflow as tf
__snake_case : List[str] = tf.__version__
try:
# deprecated in v2.1
__snake_case : str = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__snake_case : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
__snake_case : Optional[int] = '''not installed'''
__snake_case : List[str] = '''not installed'''
__snake_case : Dict = '''not installed'''
__snake_case : str = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__snake_case : Union[str, Any] = flax.__version__
__snake_case : List[str] = jax.__version__
__snake_case : Union[str, Any] = jaxlib.__version__
__snake_case : List[str] = jax.lib.xla_bridge.get_backend().platform
__snake_case : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(a_ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 24 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : str = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""MobileViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
__snake_case : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = 0
while b > 0:
if b & 1:
__snake_case : List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self._create_example_records()
__snake_case : Optional[int] = Dataset.from_list(a_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(a_ ):
self.assertDictEqual(a_ , example_records[i] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self._create_example_records()
__snake_case : Optional[Any] = Dataset.from_list(a_ )
__snake_case : Tuple = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def SCREAMING_SNAKE_CASE (self ): # checks what happens with missing columns
'''simple docstring'''
__snake_case : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__snake_case : List[str] = Dataset.from_list(a_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE (self ): # checks if the type can be inferred from the second record
'''simple docstring'''
__snake_case : List[Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__snake_case : Any = Dataset.from_list(a_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Dataset.from_list([] )
self.assertEqual(len(a_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
SCREAMING_SNAKE_CASE : List[str] = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase ( _snake_case : TreeNode | None ) ->int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case , __snake_case : str = get_distrib(node.left )
__snake_case , __snake_case : Optional[Any] = get_distrib(node.right )
__snake_case : Union[str, Any] = 1 - left_distrib_excess
__snake_case : Optional[int] = 1 - right_distrib_excess
__snake_case : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
__snake_case : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def lowercase ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any ) ->List[str]:
"""simple docstring"""
__snake_case : List[Any] = WavaVecaForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
__snake_case : Union[str, Any] = downstream_dict['''projector.weight''']
__snake_case : Dict = downstream_dict['''projector.bias''']
__snake_case : Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__snake_case : Optional[int] = downstream_dict['''model.post_net.linear.bias''']
return model
def lowercase ( _snake_case : Optional[Any] , _snake_case : str , _snake_case : str ) ->int:
"""simple docstring"""
__snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
__snake_case : Dict = downstream_dict['''model.linear.weight''']
__snake_case : Any = downstream_dict['''model.linear.bias''']
return model
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[int] ) ->List[Any]:
"""simple docstring"""
__snake_case : int = WavaVecaForXVector.from_pretrained(_snake_case , config=_snake_case )
__snake_case : Optional[Any] = downstream_dict['''connector.weight''']
__snake_case : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__snake_case : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__snake_case : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case : List[str] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int ) ->List[str]:
"""simple docstring"""
__snake_case : Tuple = torch.load(_snake_case , map_location='''cpu''' )
__snake_case : Tuple = checkpoint['''Downstream''']
__snake_case : Tuple = WavaVecaConfig.from_pretrained(_snake_case )
__snake_case : str = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
__snake_case : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case : str = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case : Tuple = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForXVector''' ):
__snake_case : Dict = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__snake_case : Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : Dict , _snake_case : Any , _snake_case : Tuple=None ) ->Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__snake_case : List[str] = nn.Parameter(_snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__snake_case : List[str] = nn.Parameter(_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Any ) ->int:
"""simple docstring"""
__snake_case : List[Any] = np.asarray(weights[0] )
__snake_case : Dict = np.asarray(weights[1] )
__snake_case : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_snake_case ).transpose(1 , 2 ).contiguous().view(-1 , _snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_snake_case ).transpose(1 , 2 ).contiguous().view(-1 , _snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(_snake_case ).view(-1 , _snake_case ).contiguous().transpose(0 , 1 ) , )
def lowercase ( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Dict ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = np.asarray(weights[0] )
__snake_case : Any = np.asarray(weights[1] )
__snake_case : str = np.asarray(weights[2] )
__snake_case : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_snake_case ).transpose(1 , 2 ).contiguous().view(-1 , _snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_snake_case ).transpose(1 , 2 ).contiguous().view(-1 , _snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_snake_case ).transpose(1 , 2 ).contiguous().view(-1 , _snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(_snake_case ).view(-1 , _snake_case ).contiguous().transpose(0 , 1 ) , )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Dict = weights[0][0][0]
__snake_case : Any = np.asarray(layer_norm_a[0] )
__snake_case : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_snake_case ) , torch.tensor(_snake_case ) , )
# lsh weights + output
__snake_case : List[str] = weights[0][1]
if len(_snake_case ) < 4:
set_layer_weights_in_torch_lsh(_snake_case , torch_block.attention , _snake_case )
else:
set_layer_weights_in_torch_local(_snake_case , torch_block.attention , _snake_case )
# intermediate weighs
__snake_case : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(_snake_case ) == 4:
__snake_case : str = intermediate_weights[2]
# layernorm 2
__snake_case : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
__snake_case : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_snake_case ) , torch.tensor(_snake_case ) , )
# intermediate dense
__snake_case : int = np.asarray(intermediate_weights[1][0] )
__snake_case : Tuple = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(_snake_case ) , )
# intermediate out
__snake_case : Tuple = np.asarray(intermediate_weights[4][0] )
__snake_case : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(_snake_case ) , )
def lowercase ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Optional[int] = torch_model.reformer
# word embeds
__snake_case : Dict = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_snake_case ) , )
if isinstance(weights[3] , _snake_case ):
__snake_case : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__snake_case : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__snake_case : Optional[int] = nn.Parameter(torch.tensor(_snake_case ) )
__snake_case : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__snake_case : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_snake_case , _snake_case , _snake_case )
# output layer norm
__snake_case : List[str] = np.asarray(weights[7][0] )
__snake_case : Dict = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_snake_case ) , torch.tensor(_snake_case ) , )
# output embeddings
__snake_case : Any = np.asarray(weights[9][0] )
__snake_case : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(_snake_case ) , )
def lowercase ( _snake_case : Tuple , _snake_case : int , _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ReformerConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Any = ReformerModelWithLMHead(_snake_case )
with open(_snake_case , '''rb''' ) as f:
__snake_case : Tuple = pickle.load(_snake_case )['''weights''']
set_model_weights_in_torch(_snake_case , _snake_case , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 24 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : Optional[Any] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase ( _snake_case : List[str] , _snake_case : Any , _snake_case : int=None ) ->Tuple:
"""simple docstring"""
if rng is None:
__snake_case : str = random.Random()
__snake_case : Optional[int] = 1
for dim in shape:
total_dims *= dim
__snake_case : Optional[int] = []
for _ in range(_snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__snake_case : List[str] = np.array(_snake_case , dtype=jnp.intaa ).reshape(_snake_case )
return output
def lowercase ( _snake_case : Optional[int] , _snake_case : List[Any]=None ) ->Any:
"""simple docstring"""
__snake_case : Optional[int] = ids_tensor(_snake_case , vocab_size=2 , rng=_snake_case )
# make sure that at least one token is attended to for each batch
__snake_case : Optional[Any] = 1
return attn_mask
@require_flax
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case : str = 2
__snake_case : Tuple = inputs['''input_ids'''].shape[-1] // 2
__snake_case : int = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__snake_case : int = jnp.ones_like(a_ )
__snake_case : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config()
__snake_case : Optional[Any] = False
__snake_case : Tuple = max_length
__snake_case : int = 0
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case : Any = getattr(a_ , a_ )
__snake_case : Optional[Any] = pt_model_class(a_ ).eval()
__snake_case : Union[str, Any] = load_flax_weights_in_pytorch_model(a_ , flax_model.params )
__snake_case : Optional[Any] = flax_model.generate(a_ ).sequences
__snake_case : Dict = pt_model.generate(torch.tensor(a_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config()
__snake_case : Any = False
__snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : Union[str, Any] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Tuple = jit(model.generate )
__snake_case : Tuple = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = self._get_input_ids_and_config()
__snake_case : Dict = True
__snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : List[str] = model_class(a_ )
__snake_case : Tuple = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Union[str, Any] = jit(model.generate )
__snake_case : List[str] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : str = self._get_input_ids_and_config()
__snake_case : List[str] = False
__snake_case : List[Any] = max_length
__snake_case : List[str] = 2
for model_class in self.all_generative_model_classes:
__snake_case : Optional[int] = model_class(a_ )
__snake_case : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : List[Any] = jit(model.generate )
__snake_case : List[str] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config()
__snake_case : Optional[Any] = False
__snake_case : Dict = max_length
__snake_case : List[str] = 2
__snake_case : List[str] = 2
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = self._get_input_ids_and_config()
__snake_case : List[str] = True
__snake_case : Tuple = max_length
__snake_case : Optional[int] = 0.8
__snake_case : Dict = 10
__snake_case : Tuple = 0.3
__snake_case : List[Any] = 1
__snake_case : Tuple = 8
__snake_case : Optional[int] = 9
for model_class in self.all_generative_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : Any = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Any = jit(model.generate )
__snake_case : int = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : int = self._get_input_ids_and_config()
__snake_case : Optional[int] = max_length
__snake_case : int = 1
__snake_case : Dict = 8
__snake_case : Dict = 9
for model_class in self.all_generative_model_classes:
__snake_case : Tuple = model_class(a_ )
__snake_case : Union[str, Any] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[Any] = jit(model.generate )
__snake_case : Optional[Any] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self._get_input_ids_and_config()
__snake_case : Optional[int] = max_length
__snake_case : Tuple = 2
__snake_case : Any = 1
__snake_case : Any = 8
__snake_case : Any = 9
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : Any = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[Any] = jit(model.generate )
__snake_case : Optional[int] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Optional[int] = attention_mask.at[(0, 0)].set(0 )
__snake_case : str = False
__snake_case : int = max_length
for model_class in self.all_generative_model_classes:
__snake_case : Union[str, Any] = model_class(a_ )
__snake_case : int = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : int = jit(model.generate )
__snake_case : Tuple = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Any = attention_mask.at[(0, 0)].set(0 )
__snake_case : Optional[Any] = True
__snake_case : List[str] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : str = model_class(a_ )
__snake_case : Dict = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[int] = jit(model.generate )
__snake_case : List[Any] = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Any = attention_mask.at[(0, 0)].set(0 )
__snake_case : Union[str, Any] = 2
__snake_case : int = max_length
for model_class in self.all_generative_model_classes:
__snake_case : List[str] = model_class(a_ )
__snake_case : Dict = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Any = jit(model.generate )
__snake_case : Optional[int] = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__snake_case : int = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__snake_case : Union[str, Any] = '''Hello world'''
__snake_case : Optional[Any] = tokenizer(a_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a_ , '''do_samples''' ):
model.generate(a_ , do_samples=a_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a_ , '''foo''' ):
__snake_case : Optional[int] = {'''foo''': '''bar'''}
model.generate(a_ , **a_ )
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : Union[str, Any] = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE : Tuple = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE : List[Any] = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : str = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE : List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ = 3 , a_ = 1 , a_ = 1 , a_ = "relu" , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__snake_case : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__snake_case : str = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=a_ , strides=a_ , padding='''VALID''' , groups=a_ , use_bias=a_ , name='''convolution''' , )
__snake_case : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__snake_case : int = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = self.convolution(self.padding(a_ ) )
__snake_case : int = self.normalization(a_ )
__snake_case : Optional[int] = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Tuple = config.num_channels
__snake_case : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__snake_case : Dict = tf.transpose(a_ , perm=(0, 2, 3, 1) )
__snake_case : List[Any] = self.embedder(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ = 2 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Dict = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name='''convolution''' )
__snake_case : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(a_ ) , training=a_ )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
__snake_case : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.pooler(a_ )
for layer_module in self.attention:
__snake_case : List[str] = layer_module(a_ )
__snake_case : List[str] = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 1 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = in_channels != out_channels or stride != 1
__snake_case : List[str] = max(1 , out_channels // config.groups_width )
__snake_case : Union[str, Any] = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__snake_case : List[Any] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.2''' ),
]
__snake_case : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = hidden_state
for layer_module in self.layers:
__snake_case : Dict = layer_module(a_ )
__snake_case : Any = self.shortcut(a_ )
hidden_state += residual
__snake_case : Dict = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 1 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Optional[int] = in_channels != out_channels or stride != 1
__snake_case : List[Any] = max(1 , out_channels // config.groups_width )
__snake_case : List[str] = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__snake_case : List[Any] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.3''' ),
]
__snake_case : List[str] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = hidden_state
for layer_module in self.layers:
__snake_case : Optional[int] = layer_module(a_ )
__snake_case : List[str] = self.shortcut(a_ )
hidden_state += residual
__snake_case : Tuple = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 2 , a_ = 2 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__snake_case : Dict = [
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , a_ , stride=a_ , name='''layers.0''' ),
*[layer(a_ , a_ , a_ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for layer_module in self.layers:
__snake_case : Tuple = layer_module(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__snake_case : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=f"""stages.{i+1}""" ) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False , a_ = True ):
'''simple docstring'''
__snake_case : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : Optional[Any] = hidden_states + (hidden_state,)
__snake_case : Tuple = stage_module(a_ )
if output_hidden_states:
__snake_case : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase__ =RegNetConfig
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Optional[Any] = config
__snake_case : Any = TFRegNetEmbeddings(a_ , name='''embedder''' )
__snake_case : Dict = TFRegNetEncoder(a_ , name='''encoder''' )
__snake_case : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
@unpack_inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = False , ):
'''simple docstring'''
__snake_case : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = self.embedder(a_ , training=a_ )
__snake_case : str = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
__snake_case : Tuple = encoder_outputs[0]
__snake_case : Union[str, Any] = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
__snake_case : str = tf.transpose(a_ , perm=(0, 3, 1, 2) )
__snake_case : List[Any] = tf.transpose(a_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__snake_case : List[Any] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =RegNetConfig
lowerCamelCase__ ='regnet'
lowerCamelCase__ ='pixel_values'
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE : Optional[int] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.', __snake_case, )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , *a_ , **a_ ):
'''simple docstring'''
super().__init__(a_ , *a_ , **a_ )
__snake_case : List[str] = TFRegNetMainLayer(a_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_=False , ):
'''simple docstring'''
__snake_case : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = self.regnet(
pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', __snake_case, )
class _UpperCAmelCase ( __snake_case, __snake_case ):
'''simple docstring'''
def __init__(self , a_ , *a_ , **a_ ):
'''simple docstring'''
super().__init__(a_ , *a_ , **a_ )
__snake_case : Tuple = config.num_labels
__snake_case : List[Any] = TFRegNetMainLayer(a_ , name='''regnet''' )
# classification head
__snake_case : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE (self , a_ = None , a_ = None , a_ = None , a_ = None , a_=False , ):
'''simple docstring'''
__snake_case : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Dict = self.regnet(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
__snake_case : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
__snake_case : Dict = self.classifier[0](a_ )
__snake_case : str = self.classifier[1](a_ )
__snake_case : List[str] = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ )
if not return_dict:
__snake_case : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 24 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE : str = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def lowercase ( _snake_case : Any , _snake_case : str=False ) ->Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = create_model(
'''HTSAT-tiny''' , '''roberta''' , _snake_case , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_snake_case , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowercase ( _snake_case : Dict ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Any = {}
__snake_case : Tuple = r'''.*sequential.(\d+).*'''
__snake_case : Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : str = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
__snake_case : int = re.match(_snake_case , _snake_case ).group(1 )
__snake_case : Optional[int] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(_snake_case )//3}.linear.""" )
elif re.match(_snake_case , _snake_case ):
__snake_case : Union[str, Any] = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : Any = 1 if projecton_layer == 0 else 2
__snake_case : Dict = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : str = value
__snake_case : Optional[Any] = mixed_qkv.size(0 ) // 3
__snake_case : Tuple = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Optional[int] = query_layer
__snake_case : Optional[Any] = key_layer
__snake_case : Optional[int] = value_layer
else:
__snake_case : Optional[Any] = value
return model_state_dict
def lowercase ( _snake_case : int , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any]=False ) ->List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
__snake_case : str = clap_model.state_dict()
__snake_case : int = rename_state_dict(_snake_case )
__snake_case : Dict = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : str = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE : str = """http://www.mocksite.com/file1.txt"""
SCREAMING_SNAKE_CASE : Tuple = """\"text\": [\"foo\", \"foo\"]"""
SCREAMING_SNAKE_CASE : int = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =200
lowerCamelCase__ ={'Content-Length': '100'}
lowerCamelCase__ ={}
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
return [bytes(a_ , '''utf-8''' )]
def lowercase ( *_snake_case : Union[str, Any] , **_snake_case : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def lowercase ( _snake_case : str , _snake_case : List[str] , _snake_case : int ) ->Optional[int]:
"""simple docstring"""
import requests
monkeypatch.setattr(_snake_case , '''request''' , _snake_case )
__snake_case : Any = URL
if issubclass(_snake_case , _snake_case ):
__snake_case : Any = url
elif issubclass(_snake_case , _snake_case ):
__snake_case : List[Any] = [url]
elif issubclass(_snake_case , _snake_case ):
__snake_case : Optional[int] = {'''train''': url}
__snake_case : Union[str, Any] = '''dummy'''
__snake_case : str = '''downloads'''
__snake_case : Union[str, Any] = tmp_path
__snake_case : Tuple = DownloadConfig(
cache_dir=os.path.join(_snake_case , _snake_case ) , use_etag=_snake_case , )
__snake_case : Optional[int] = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
__snake_case : Union[str, Any] = dl_manager.download(_snake_case )
__snake_case : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case , _snake_case ):
__snake_case : Dict = [downloaded_paths]
__snake_case : Optional[int] = [urls]
elif isinstance(_snake_case , _snake_case ):
assert "train" in downloaded_paths.keys()
__snake_case : str = downloaded_paths.values()
__snake_case : List[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case , _snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : Any = Path(_snake_case )
__snake_case : Dict = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : List[Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : int = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def lowercase ( _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = str(_snake_case )
if issubclass(_snake_case , _snake_case ):
__snake_case : List[str] = filename
elif issubclass(_snake_case , _snake_case ):
__snake_case : Tuple = [filename]
elif issubclass(_snake_case , _snake_case ):
__snake_case : Tuple = {'''train''': filename}
__snake_case : Tuple = '''dummy'''
__snake_case : Optional[Any] = xz_file.parent
__snake_case : str = '''extracted'''
__snake_case : int = DownloadConfig(
cache_dir=_snake_case , use_etag=_snake_case , )
__snake_case : Dict = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
__snake_case : int = dl_manager.extract(_snake_case )
__snake_case : Tuple = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case , _snake_case ):
__snake_case : Optional[Any] = [extracted_paths]
__snake_case : Optional[Any] = [paths]
elif isinstance(_snake_case , _snake_case ):
assert "train" in extracted_paths.keys()
__snake_case : Dict = extracted_paths.values()
__snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case , _snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : List[Any] = Path(_snake_case )
__snake_case : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case , etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase ( _snake_case : int , _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(_snake_case , start=1 ):
__snake_case : List[str] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowercase ( _snake_case : int , _snake_case : List[Any] ) ->Dict:
"""simple docstring"""
__snake_case : List[str] = request.getfixturevalue(_snake_case )
__snake_case : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowercase ( _snake_case : Any , _snake_case : List[Any] ) ->str:
"""simple docstring"""
__snake_case : Dict = request.getfixturevalue(_snake_case )
__snake_case : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowercase ( _snake_case : List[Any] ) ->Dict:
"""simple docstring"""
__snake_case : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) , start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='pegasus'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=5_02_65 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.0 , a_=0.0 , a_=True , a_=True , a_="gelu" , a_=10_24 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=0 , a_=False , a_=0 , a_=1 , a_=1 , **a_ , ):
'''simple docstring'''
__snake_case : List[Any] = vocab_size
__snake_case : Dict = max_position_embeddings
__snake_case : Union[str, Any] = d_model
__snake_case : List[Any] = encoder_ffn_dim
__snake_case : Union[str, Any] = encoder_layers
__snake_case : int = encoder_attention_heads
__snake_case : Optional[Any] = decoder_ffn_dim
__snake_case : List[str] = decoder_layers
__snake_case : List[str] = decoder_attention_heads
__snake_case : int = dropout
__snake_case : List[Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Dict = activation_function
__snake_case : Optional[int] = init_std
__snake_case : int = encoder_layerdrop
__snake_case : List[Any] = decoder_layerdrop
__snake_case : List[str] = use_cache
__snake_case : int = encoder_layers
__snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.d_model
| 24 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.dummy_uncond_unet
__snake_case : int = DDIMScheduler()
__snake_case : str = self.dummy_vq_model
__snake_case : Union[str, Any] = LDMPipeline(unet=a_ , vqvae=a_ , scheduler=a_ )
ldm.to(a_ )
ldm.set_progress_bar_config(disable=a_ )
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Optional[Any] = ldm(generator=a_ , num_inference_steps=2 , output_type='''numpy''' ).images
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = ldm(generator=a_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=a_ )[0]
__snake_case : Dict = image[0, -3:, -3:, -1]
__snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : List[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__snake_case : str = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(a_ )
ldm.set_progress_bar_config(disable=a_ )
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : Dict = ldm(generator=a_ , num_inference_steps=5 , output_type='''numpy''' ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__snake_case : Dict = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
__snake_case : str = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 24 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.dummy_uncond_unet
__snake_case : Optional[int] = PNDMScheduler()
__snake_case : List[Any] = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Dict = pndm(generator=a_ , num_inference_steps=20 , output_type='''numpy''' ).images
__snake_case : List[Any] = torch.manual_seed(0 )
__snake_case : List[str] = pndm(generator=a_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=a_ )[0]
__snake_case : Any = image[0, -3:, -3:, -1]
__snake_case : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''google/ddpm-cifar10-32'''
__snake_case : str = UNetaDModel.from_pretrained(a_ )
__snake_case : List[Any] = PNDMScheduler()
__snake_case : Optional[Any] = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : str = pndm(generator=a_ , output_type='''numpy''' ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : List[Any] = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Optional[int] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : List[str] = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : Optional[int] = value
elif weight_type == "weight_g":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : Optional[Any] = value
else:
__snake_case : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : Optional[Any] ) ->int:
"""simple docstring"""
__snake_case : str = []
__snake_case : int = fairseq_model.state_dict()
__snake_case : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : List[Any] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__snake_case : List[str] = True
if "*" in mapped_key:
__snake_case : Optional[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Dict = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : List[Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : Optional[Any] = '''weight_v'''
elif "bias" in name:
__snake_case : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[str] = '''weight'''
else:
__snake_case : Dict = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : Any = name.split('''.''' )
__snake_case : Tuple = int(items[0] )
__snake_case : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any]=None , _snake_case : int=None , _snake_case : Union[str, Any]=True ) ->str:
"""simple docstring"""
if config_path is not None:
__snake_case : int = UniSpeechSatConfig.from_pretrained(_snake_case )
else:
__snake_case : str = UniSpeechSatConfig()
__snake_case : Optional[Any] = ''''''
if is_finetuned:
__snake_case : Optional[Any] = UniSpeechSatForCTC(_snake_case )
else:
__snake_case : Tuple = UniSpeechSatForPreTraining(_snake_case )
__snake_case , __snake_case , __snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__snake_case : Any = model[0].eval()
recursively_load_weights(_snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='ClapFeatureExtractor'
lowerCamelCase__ =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = kwargs.pop('''sampling_rate''' , a_ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
__snake_case : Dict = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
__snake_case : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.tokenizer.model_input_names
__snake_case : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 24 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Dict = process_name # process name
__snake_case : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__snake_case : Optional[Any] = arrival_time
__snake_case : Union[str, Any] = burst_time # remaining burst time
__snake_case : str = 0 # total time of the process wait in ready queue
__snake_case : Union[str, Any] = 0 # time from arrival time to completion time
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
__snake_case : Optional[int] = time_slices
# unfinished process is in this ready_queue
__snake_case : Dict = queue
# current time
__snake_case : Union[str, Any] = current_time
# finished process is in this sequence queue
__snake_case : deque[Process] = deque()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = []
for i in range(len(a_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i in range(len(a_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[str] = []
for i in range(len(a_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : deque[Process] = deque() # sequence deque of finished process
while len(a_ ) != 0:
__snake_case : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__snake_case : Dict = 0
# set the process's turnaround time because it is finished
__snake_case : Any = self.current_time - cp.arrival_time
# set the completion time
__snake_case : str = self.current_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a_ ) ):
__snake_case : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__snake_case : int = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__snake_case : Dict = 0
# set the finish time
__snake_case : str = self.current_time
# update the process' turnaround time because it is finished
__snake_case : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
__snake_case , __snake_case : int = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE : Any = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE : List[Any] = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE : Optional[int] = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE : Optional[int] = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Dict = [17, 25]
SCREAMING_SNAKE_CASE : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE : Any = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE : int = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE : str = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE : Tuple = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : str = [17, 25]
SCREAMING_SNAKE_CASE : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE : Dict = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 1 |
"""simple docstring"""
from itertools import permutations
def lowercase ( _snake_case : tuple ) ->bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : str = [7, 11, 13, 17]
for i, test in enumerate(_snake_case ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( _snake_case : int = 10 ) ->int:
"""simple docstring"""
return sum(
int(''''''.join(map(_snake_case , _snake_case ) ) )
for num in permutations(range(_snake_case ) )
if is_substring_divisible(_snake_case ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lilt'
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_="absolute" , a_=None , a_=4 , a_=10_24 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , **a_ )
__snake_case : str = vocab_size
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : str = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : int = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Dict = classifier_dropout
__snake_case : Any = channel_shrink_ratio
__snake_case : Dict = max_ad_position_embeddings
| 24 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24 | 1 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=_snake_case , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=_snake_case , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=_snake_case , help='''where to store parsed gold_data_path file''' , )
__snake_case : Any = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__snake_case : Union[str, Any] = json.load(_snake_case )
for dpr_record in tqdm(_snake_case ):
__snake_case : List[Any] = dpr_record['''question''']
__snake_case : Dict = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(_snake_case ) + '''\n''' )
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE : Tuple = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE : Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def lowercase ( _snake_case : str ) ->dict[Any, Any]:
"""simple docstring"""
__snake_case : Dict = {
'''Authorization''': f"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_snake_case , headers=_snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 24 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='canine'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_63_84 , a_=16 , a_=0.02 , a_=1E-12 , a_=0 , a_=0XE_0_0_0 , a_=0XE_0_0_1 , a_=4 , a_=4 , a_=8 , a_=1_63_84 , a_=1_28 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : int = max_position_embeddings
__snake_case : Union[str, Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : str = initializer_range
__snake_case : Tuple = type_vocab_size
__snake_case : Dict = layer_norm_eps
# Character config:
__snake_case : Dict = downsampling_rate
__snake_case : List[Any] = upsampling_kernel_size
__snake_case : Optional[Any] = num_hash_functions
__snake_case : Any = num_hash_buckets
__snake_case : Optional[int] = local_transformer_stride
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->int:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case : Tuple = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''ZinengTang/tvlt-base'''
__snake_case : List[Any] = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **a_ )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_image_processor()
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : str = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
__snake_case : Tuple = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a_ )
self.assertIsInstance(processor.image_processor , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Optional[int] = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
__snake_case : Dict = np.ones([1_20_00] )
__snake_case : Dict = feature_extractor(a_ , return_tensors='''np''' )
__snake_case : Optional[int] = processor(audio=a_ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_image_processor()
__snake_case : Optional[int] = self.get_feature_extractor()
__snake_case : Tuple = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
__snake_case : str = np.ones([3, 2_24, 2_24] )
__snake_case : List[str] = image_processor(a_ , return_tensors='''np''' )
__snake_case : Optional[int] = processor(images=a_ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_feature_extractor()
__snake_case : List[Any] = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
__snake_case : Union[str, Any] = np.ones([1_20_00] )
__snake_case : Optional[int] = np.ones([3, 2_24, 2_24] )
__snake_case : Union[str, Any] = processor(audio=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : int = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase ( _snake_case : int = 1_500_000 ) ->int:
"""simple docstring"""
__snake_case : defaultdict = defaultdict(_snake_case )
__snake_case : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _snake_case , 2 ):
if gcd(_snake_case , _snake_case ) > 1:
continue
__snake_case : Tuple = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_snake_case , limit + 1 , _snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__(self , a_=None , **a_ ):
'''simple docstring'''
super().__init__(features=a_ )
__snake_case : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : str = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : Dict = {'''dtype''': torch.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Tuple = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
__snake_case : Any = np.asarray(a_ )
return torch.tensor(a_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a_ , '''__array__''' ) and not isinstance(a_ , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_row(a_ )
__snake_case : Dict = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_column(a_ )
__snake_case : Dict = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
__snake_case : List[Any] = self.recursive_tensorize(a_ )
__snake_case : Dict = self._consolidate(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[str] = self.numpy_arrow_extractor().extract_batch(a_ )
__snake_case : Tuple = self.python_features_decoder.decode_batch(a_ )
__snake_case : str = self.recursive_tensorize(a_ )
for column_name in batch:
__snake_case : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 24 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any ) ->Any:
"""simple docstring"""
__snake_case : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_snake_case , '''r''' ) as f:
__snake_case : Optional[int] = f.readlines()
__snake_case : List[str] = f"""class {class_name}("""
__snake_case : List[str] = f"""{4 * ' '}def {test_name}("""
__snake_case : Any = f"""{8 * ' '}{correct_line.split()[0]}"""
__snake_case : int = f"""{16 * ' '}{correct_line.split()[0]}"""
__snake_case : Any = False
__snake_case : Optional[Any] = False
__snake_case : Any = False
__snake_case : Dict = False
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : Any = []
for line in lines:
if line.startswith(_snake_case ):
__snake_case : Dict = True
elif in_class and line.startswith(_snake_case ):
__snake_case : Union[str, Any] = True
elif in_class and in_func and (line.startswith(_snake_case ) or line.startswith(_snake_case )):
__snake_case : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__snake_case : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case : List[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
__snake_case : str = False
else:
new_lines.append(_snake_case )
with open(_snake_case , '''w''' ) as f:
for line in new_lines:
f.write(_snake_case )
def lowercase ( _snake_case : Dict , _snake_case : Any=None ) ->List[Any]:
"""simple docstring"""
if fail is not None:
with open(_snake_case , '''r''' ) as f:
__snake_case : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
__snake_case : List[Any] = None
with open(_snake_case , '''r''' ) as f:
__snake_case : int = f.readlines()
__snake_case : int = defaultdict(_snake_case )
for line in correct_lines:
__snake_case , __snake_case , __snake_case , __snake_case : Any = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
"""simple docstring"""
from math import pi
def lowercase ( _snake_case : int , _snake_case : int ) ->float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=16 , a_=13 , a_=7 , a_=14 , a_=10 , a_=19 , a_=5 , a_=4 , a_=True , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=[1, 2, 3, 4, 5] , a_=25 , a_=5 , ):
'''simple docstring'''
__snake_case : List[str] = d_model
__snake_case : str = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : List[Any] = prediction_length
__snake_case : Tuple = context_length
__snake_case : Optional[int] = cardinality
__snake_case : Tuple = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : Tuple = embedding_dimension
__snake_case : int = is_training
__snake_case : List[str] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = context_length
__snake_case : str = prediction_length + label_length
__snake_case : Union[str, Any] = label_length
__snake_case : Dict = moving_average
__snake_case : Tuple = autocorrelation_factor
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
__snake_case : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Any = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Dict = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.get_config()
__snake_case : List[Any] = self.prepare_autoformer_inputs_dict(a_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = AutoformerModel(config=a_ ).to(a_ ).eval()
__snake_case : Union[str, Any] = model(**a_ )
__snake_case : str = outputs.encoder_last_hidden_state
__snake_case : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = model.get_encoder()
encoder.save_pretrained(a_ )
__snake_case : Any = AutoformerEncoder.from_pretrained(a_ ).to(a_ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = model.create_network_inputs(**a_ )
__snake_case , __snake_case : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=a_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = model.get_decoder()
decoder.save_pretrained(a_ )
__snake_case : List[str] = AutoformerDecoder.from_pretrained(a_ ).to(a_ )
__snake_case : Optional[int] = decoder(
trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase__ =(AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
__snake_case , __snake_case : List[str] = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info['''missing_keys'''] , [] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = inspect.signature(getattr(a_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__snake_case : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(a_ )] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
__snake_case : Optional[int] = getattr(self.model_tester , '''seq_length''' , a_ )
__snake_case : Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , a_ )
__snake_case : List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , a_ )
__snake_case : Any = getattr(self.model_tester , '''d_model''' , a_ )
__snake_case : int = getattr(self.model_tester , '''num_attention_heads''' , a_ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : List[str] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Any = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : List[str] = len(a_ )
__snake_case : Optional[int] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a_ , a_ )
# decoder attentions
__snake_case : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : Optional[int] = outputs.cross_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Optional[Any] = True
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 2 , len(a_ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowercase ( _snake_case : Optional[Any]="train-batch.pt" ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_snake_case , repo_type='''dataset''' )
__snake_case : int = torch.load(_snake_case , map_location=_snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Union[str, Any] = prepare_batch()
with torch.no_grad():
__snake_case : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__snake_case : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__snake_case : List[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a_ )
__snake_case : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : List[str] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__snake_case : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a_ )
__snake_case : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ )
__snake_case : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1E-1 ) )
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( _snake_case : str , _snake_case : complex , _snake_case : str = "x" , _snake_case : float = 10**-10 , _snake_case : int = 1 , ) ->complex:
"""simple docstring"""
__snake_case : Optional[Any] = symbols(_snake_case )
__snake_case : Tuple = lambdify(_snake_case , _snake_case )
__snake_case : Optional[Any] = lambdify(_snake_case , diff(_snake_case , _snake_case ) )
__snake_case : List[str] = starting_point
while True:
if diff_function(_snake_case ) != 0:
__snake_case : Union[str, Any] = prev_guess - multiplicity * func(_snake_case ) / diff_function(
_snake_case )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__snake_case : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 24 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =PandasConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Dict = data_files
if isinstance(a_ , a_ ):
__snake_case : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : Dict = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : Dict = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : Any = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : Optional[int] = table_cast(a_ , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : Optional[int] = pa.Table.from_pandas(pd.read_pickle(a_ ) )
yield i, self._cast_table(a_ )
| 24 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.