code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def lowerCamelCase_ ( lowerCAmelCase: int = 50_00_00_00 )-> int:
_snake_case : Dict = set()
_snake_case : Dict = int((limit - 24) ** (1 / 2) )
_snake_case : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase ) ) )
for primea in primes:
_snake_case : Optional[Any] = primea * primea
for primea in primes:
_snake_case : str = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case : List[Any] = primea * primea * primea * primea
_snake_case : Dict = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase )
return len(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Optional[Any]=18 , UpperCamelCase : Any=30 , UpperCamelCase : Tuple=4_00 , UpperCamelCase : int=None , UpperCamelCase : int=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Union[str, Any]=None , ):
'''simple docstring'''
_snake_case : Dict = size if size is not None else {'height': 20, 'width': 20}
_snake_case : List[Any] = parent
_snake_case : int = batch_size
_snake_case : int = num_channels
_snake_case : Optional[int] = image_size
_snake_case : Optional[int] = min_resolution
_snake_case : Any = max_resolution
_snake_case : Dict = size
_snake_case : Any = do_normalize
_snake_case : List[Any] = do_convert_rgb
_snake_case : Any = [5_12, 10_24, 20_48, 40_96]
_snake_case : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_snake_case : Any = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Dict =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.image_processor_tester.prepare_dummy_image()
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
_snake_case : Optional[int] = 20_48
_snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Tuple = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_snake_case : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase ):
_snake_case : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
_snake_case : int = 'Hello'
_snake_case : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : List[Any] = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
_snake_case : Tuple = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Tuple = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : str = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_snake_case : Union[str, Any] = 3
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Dict = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =ConsistencyModelPipeline
a_ : List[str] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ : Optional[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ : Tuple =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Dict=False ):
'''simple docstring'''
if class_cond:
_snake_case : Dict = self.dummy_cond_unet
else:
_snake_case : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[int]=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : Optional[int] = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.get_dummy_components()
_snake_case : Dict = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : List[str] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Dict = self.get_dummy_inputs(UpperCamelCase )
_snake_case : str = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.get_dummy_components(class_cond=UpperCamelCase )
_snake_case : Any = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : Any = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[Any] = self.get_dummy_inputs(UpperCamelCase )
_snake_case : int = 0
_snake_case : List[str] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : int = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : int = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : str = self.get_dummy_inputs(UpperCamelCase )
_snake_case : Tuple = 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : str = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase )
_snake_case : Tuple = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : Union[str, Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Dict = self.get_dummy_inputs(UpperCamelCase )
_snake_case : List[Any] = 1
_snake_case : Optional[Any] = None
_snake_case : Optional[Any] = 0
_snake_case : int = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple=0 , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]="cpu" , UpperCamelCase : List[Any]=torch.floataa , UpperCamelCase : List[Any]=(1, 3, 64, 64) ):
'''simple docstring'''
_snake_case : str = torch.manual_seed(UpperCamelCase )
_snake_case : Tuple = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_snake_case : Union[str, Any] = self.get_fixed_latents(seed=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase , shape=UpperCamelCase )
_snake_case : Dict = latents
return inputs
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[str]="cpu" , UpperCamelCase : Optional[Any]=torch.floataa , UpperCamelCase : Tuple=(1, 3, 64, 64) ):
'''simple docstring'''
if type(UpperCamelCase ) == str:
_snake_case : List[Any] = torch.device(UpperCamelCase )
_snake_case : Any = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : List[str] = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
return latents
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[str] = self.get_inputs()
_snake_case : Union[str, Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Any = self.get_inputs()
_snake_case : List[str] = 1
_snake_case : Dict = None
_snake_case : List[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : List[Any] = image[0, -3:, -3:, -1]
_snake_case : str = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Union[str, Any] = self.get_inputs(get_fixed_latents=UpperCamelCase , device=UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase , enable_math=UpperCamelCase , enable_mem_efficient=UpperCamelCase ):
_snake_case : int = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : str = image[0, -3:, -3:, -1]
_snake_case : List[str] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : List[str] = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase , device=UpperCamelCase )
_snake_case : Union[str, Any] = 1
_snake_case : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase , enable_math=UpperCamelCase , enable_mem_efficient=UpperCamelCase ):
_snake_case : Optional[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : str=7 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : int=10 , UpperCamelCase : Dict=18 , UpperCamelCase : Any=30 , UpperCamelCase : Optional[Any]=4_00 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase : str=None , ):
'''simple docstring'''
_snake_case : Optional[int] = size if size is not None else {'shortest_edge': 18}
_snake_case : Optional[int] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_snake_case : Tuple = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = num_frames
_snake_case : Union[str, Any] = image_size
_snake_case : List[str] = min_resolution
_snake_case : int = max_resolution
_snake_case : Optional[int] = do_resize
_snake_case : Dict = size
_snake_case : List[str] = do_normalize
_snake_case : List[Any] = image_mean
_snake_case : int = image_std
_snake_case : List[Any] = crop_size
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : List[str] =VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case : Optional[int] = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : Union[str, Any] = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case : Any = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : int = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case : Tuple = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : Dict = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from functools import lru_cache
@lru_cache
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: Dict )-> str:
if height >= 1:
move_tower(height - 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
move_disk(lowerCAmelCase , lowerCAmelCase )
move_tower(height - 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int )-> str:
print('moving disk from' , lowerCAmelCase , 'to' , lowerCAmelCase )
def lowerCamelCase_ ( )-> Union[str, Any]:
_snake_case : Optional[int] = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int = 3 , lowerCAmelCase: int = 7 , lowerCAmelCase: int = 1_00_00_00 )-> int:
_snake_case : int = 0
_snake_case : Optional[Any] = 1
for current_denominator in range(1 , limit + 1 ):
_snake_case : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_snake_case : List[Any] = current_numerator
_snake_case : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("""T""")
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : T ):
'''simple docstring'''
_snake_case : Union[str, Any] = data
_snake_case : List[str] = self
_snake_case : str = 0
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
_snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : T ):
'''simple docstring'''
_snake_case : List[Any] = DisjointSetTreeNode(UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : T ):
'''simple docstring'''
_snake_case : str = self.map[data]
if elem_ref != elem_ref.parent:
_snake_case : List[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : DisjointSetTreeNode[T] , UpperCamelCase : DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
_snake_case : List[str] = nodea
else:
_snake_case : Optional[int] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase_ ( self : Any , UpperCamelCase : T , UpperCamelCase : T ):
'''simple docstring'''
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
_snake_case : dict[T, dict[T, int]] = {}
def UpperCamelCase_ ( self : int , UpperCamelCase : T ):
'''simple docstring'''
if node not in self.connections:
_snake_case : List[str] = {}
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : T , UpperCamelCase : T , UpperCamelCase : int ):
'''simple docstring'''
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
_snake_case : Any = weight
_snake_case : Any = weight
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = []
_snake_case : Tuple = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
_snake_case : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
_snake_case : Any = 0
_snake_case : int = 0
_snake_case : Optional[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_snake_case , _snake_case , _snake_case : Any = edges[index]
index += 1
_snake_case : Union[str, Any] = disjoint_set.find_set(UpperCamelCase )
_snake_case : Dict = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Any=3 , UpperCamelCase : str=32 , UpperCamelCase : int=3 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Tuple=[10, 20, 30, 40] , UpperCamelCase : str=[1, 1, 2, 1] , UpperCamelCase : Tuple=True , UpperCamelCase : Any=True , UpperCamelCase : int="relu" , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : List[str] = batch_size
_snake_case : int = image_size
_snake_case : int = num_channels
_snake_case : Optional[Any] = embeddings_size
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[str] = depths
_snake_case : int = is_training
_snake_case : Tuple = use_labels
_snake_case : int = hidden_act
_snake_case : Optional[int] = num_labels
_snake_case : Optional[int] = scope
_snake_case : List[Any] = len(UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : int = None
if self.use_labels:
_snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Tuple = TFResNetModel(config=UpperCamelCase )
_snake_case : str = model(UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Optional[Any] = TFResNetForImageClassification(UpperCamelCase )
_snake_case : List[Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Tuple = config_and_inputs
_snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : str =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] =(
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a_ : Optional[Any] =False
a_ : List[Any] =False
a_ : Any =False
a_ : str =False
a_ : List[str] =False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = TFResNetModelTester(self )
_snake_case : List[str] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(UpperCamelCase )
_snake_case : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
_snake_case : Union[str, Any] = model_class(UpperCamelCase )
_snake_case : str = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : int = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Union[str, Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case : int = layer_type
_snake_case : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = TFResNetModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase_ ( )-> Dict:
_snake_case : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case : str = self.default_image_processor
_snake_case : Union[str, Any] = prepare_img()
_snake_case : Tuple = image_processor(images=UpperCamelCase , return_tensors='tf' )
# forward pass
_snake_case : List[str] = model(**UpperCamelCase )
# verify the logits
_snake_case : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
_snake_case : List[Any] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase , atol=1e-4 ) )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from __future__ import annotations
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = text, pattern
_snake_case , _snake_case : Tuple = len(UpperCamelCase ), len(UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Optional[int] = self.mismatch_in_text(UpperCamelCase )
if mismatch_index == -1:
positions.append(UpperCamelCase )
else:
_snake_case : str = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase_ = """ABAABA"""
lowerCAmelCase_ = """AB"""
lowerCAmelCase_ = BoyerMooreSearch(text, pattern)
lowerCAmelCase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Union[str, Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : Optional[Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase , not kwargs.pop(UpperCamelCase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript )
_snake_case : Dict = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
_snake_case : Any = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase )
a_ : bool =field(default=UpperCAmelCase_ , metadata={"""help""": """Trace the models using torchscript"""} )
a_ : bool =field(default=UpperCAmelCase_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
a_ : str =field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
_snake_case : List[str] = torch.device('cpu' )
_snake_case : Dict = 0
elif is_torch_tpu_available():
_snake_case : str = xm.xla_device()
_snake_case : Optional[Any] = 0
else:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_snake_case : Any = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.n_gpu > 0
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : int=None , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = parent
_snake_case : Union[str, Any] = config_class
_snake_case : Optional[int] = has_text_modality
_snake_case : Optional[int] = kwargs
_snake_case : str = common_properties
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = self.config_class(**self.inputs_dict )
_snake_case : Any = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase ):
try:
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(
getattr(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCamelCase , UpperCamelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase ):
try:
_snake_case : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCamelCase , UpperCamelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.config_class(**self.inputs_dict )
_snake_case : List[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : str = os.path.join(UpperCamelCase , 'config.json' )
config_first.to_json_file(UpperCamelCase )
_snake_case : str = self.config_class.from_json_file(UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase )
_snake_case : Dict = self.config_class.from_pretrained(UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
_snake_case : Optional[int] = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
config_first.save_pretrained(UpperCamelCase )
_snake_case : Optional[Any] = self.config_class.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_snake_case : List[str] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.config_class.is_composition:
return
_snake_case : Tuple = self.config_class()
self.parent.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = copy.deepcopy(UpperCamelCase )
_snake_case : int = self.config_class(**UpperCamelCase )
_snake_case : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase , UpperCamelCase ) != value:
wrong_values.append((key, getattr(UpperCamelCase , UpperCamelCase ), value) )
if len(UpperCamelCase ) > 0:
_snake_case : Union[str, Any] = '\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> list:
_snake_case : Tuple = []
_snake_case , _snake_case : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_snake_case : Optional[int] = result + left + right
return input_list
def lowerCamelCase_ ( lowerCAmelCase: list )-> list:
if len(lowerCAmelCase ) <= 1:
return input_list
_snake_case : List[str] = list(lowerCAmelCase )
# iteration for two-way merging
_snake_case : Optional[Any] = 2
while p <= len(lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
_snake_case : List[str] = i
_snake_case : int = i + p - 1
_snake_case : str = (low + high + 1) // 2
_snake_case : str = merge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase ):
_snake_case : Optional[Any] = i
_snake_case : str = merge(lowerCAmelCase , 0 , lowerCAmelCase , len(lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase_ = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCAmelCase_ = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCAmelCase_ = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = 0.0
for i, j in zip(UpperCamelCase , UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase , UpperCamelCase ) else 0.0
_snake_case : Dict = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
}
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Any=13 , UpperCamelCase : List[Any]=32 , UpperCamelCase : int=3 , UpperCamelCase : List[str]=4 , UpperCamelCase : Optional[Any]=[10, 20, 30, 40] , UpperCamelCase : Union[str, Any]=[2, 2, 3, 2] , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Tuple=37 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : str=10 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Tuple=["stage2", "stage3", "stage4"] , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : Optional[int] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : Dict = num_channels
_snake_case : str = num_stages
_snake_case : str = hidden_sizes
_snake_case : Optional[Any] = depths
_snake_case : List[Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[Any] = out_features
_snake_case : Optional[Any] = num_labels
_snake_case : int = scope
_snake_case : Union[str, Any] = num_stages
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : str = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = UperNetForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Any = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ : Union[str, Any] ={"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ : int =False
a_ : List[str] =False
a_ : List[Any] =False
a_ : Any =False
a_ : Optional[int] =False
a_ : Dict =False
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = UperNetModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(UpperCamelCase )
_snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : int ):
_snake_case : List[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Any = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[Any] = _config_zero_init(UpperCamelCase )
_snake_case : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase_ ( )-> Optional[Any]:
_snake_case : List[str] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
_snake_case : int = Image.open(lowerCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
_snake_case : Tuple = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(UpperCamelCase )
_snake_case : int = prepare_img()
_snake_case : List[Any] = processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
with torch.no_grad():
_snake_case : Dict = model(**UpperCamelCase )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
_snake_case : Optional[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
_snake_case : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(UpperCamelCase )
_snake_case : Optional[Any] = prepare_img()
_snake_case : Tuple = processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
with torch.no_grad():
_snake_case : Dict = model(**UpperCamelCase )
_snake_case : int = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
_snake_case : List[str] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , UpperCamelCase : int = 3 , UpperCamelCase : int = 3 , UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase : Tuple[int] = (64,) , UpperCamelCase : int = 1 , UpperCamelCase : str = "silu" , UpperCamelCase : int = 3 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2_56 , UpperCamelCase : int = 32 , UpperCamelCase : Optional[int] = None , UpperCamelCase : float = 0.1_82_15 , UpperCamelCase : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_snake_case : Dict = Encoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , down_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , double_z=UpperCamelCase , )
_snake_case : List[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
_snake_case : str = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
_snake_case : Optional[int] = VectorQuantizer(UpperCamelCase , UpperCamelCase , beta=0.25 , remap=UpperCamelCase , sane_index_shape=UpperCamelCase )
_snake_case : Any = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
# pass init params to Decoder
_snake_case : List[Any] = Decoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , up_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , norm_type=UpperCamelCase , )
@apply_forward_hook
def UpperCamelCase_ ( self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.encoder(UpperCamelCase )
_snake_case : List[str] = self.quant_conv(UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase )
@apply_forward_hook
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = False , UpperCamelCase : bool = True ):
'''simple docstring'''
if not force_not_quantize:
_snake_case , _snake_case , _snake_case : Optional[Any] = self.quantize(UpperCamelCase )
else:
_snake_case : Optional[Any] = h
_snake_case : List[str] = self.post_quant_conv(UpperCamelCase )
_snake_case : str = self.decoder(UpperCamelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True ):
'''simple docstring'''
_snake_case : List[str] = sample
_snake_case : Optional[int] = self.encode(UpperCamelCase ).latents
_snake_case : str = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =AlbertTokenizer
a_ : str =AlbertTokenizerFast
a_ : List[Any] =True
a_ : Any =True
a_ : str =True
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Optional[int] = AlbertTokenizer(UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Tuple = 'this is a test'
_snake_case : List[str] = 'this is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = '<pad>'
_snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCamelCase ) , 3_00_00 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Dict = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
_snake_case : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_snake_case : Dict = tokenizer.tokenize(UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : int = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = self.get_rust_tokenizer()
_snake_case : str = tokenizer.encode(UpperCamelCase )
_snake_case : List[Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = AlbertTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [48, 25, 21, 12_89] )
_snake_case : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_snake_case : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_snake_case : str = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = AlbertTokenizer(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode('sequence builders' )
_snake_case : Dict = tokenizer.encode('multi-sequence build' )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
lowerCAmelCase_ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =VOCAB_FILES_NAMES
a_ : int =PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] =PRETRAINED_INIT_CONFIGURATION
a_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] =BertTokenizer
def __init__( self : Any , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : str=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : Any="[PAD]" , UpperCamelCase : Tuple="[CLS]" , UpperCamelCase : Any="[MASK]" , UpperCamelCase : int=True , UpperCamelCase : int=None , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : Union[str, Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[Any] = do_lower_case
_snake_case : Tuple = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : int = normalizer_class(**UpperCamelCase )
_snake_case : List[Any] = do_lower_case
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : int=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Dict = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list )-> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCAmelCase ) / len(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =["""torch""", """transformers""", """onnx"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =["""torch""", """transformers""", """onnx"""]
def __init__( self : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : str =["""torch""", """transformers""", """onnx"""]
def __init__( self : Union[str, Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *UpperCamelCase : str , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =["""torch""", """transformers""", """onnx"""]
def __init__( self : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple =["""torch""", """transformers""", """onnx"""]
def __init__( self : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : Any , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from collections.abc import Generator
def lowerCamelCase_ ( )-> Generator[int, None, None]:
_snake_case , _snake_case : Optional[Any] = 0, 1
while True:
_snake_case , _snake_case : str = b, a + b
yield b
def lowerCamelCase_ ( lowerCAmelCase: int = 10_00 )-> int:
_snake_case : int = 1
_snake_case : Tuple = fibonacci_generator()
while len(str(next(lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
from __future__ import annotations
lowerCAmelCase_ = """Muhammad Umer Farooq"""
lowerCAmelCase_ = """MIT"""
lowerCAmelCase_ = """1.0.0"""
lowerCAmelCase_ = """Muhammad Umer Farooq"""
lowerCAmelCase_ = """[email protected]"""
lowerCAmelCase_ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
super().__init__()
_snake_case : list[str] = []
_snake_case : Optional[int] = domain
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case : Tuple = parse.urljoin(self.domain , UpperCamelCase )
self.urls.append(UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
return ".".join(get_sub_domain_name(lowerCAmelCase ).split('.' )[-2:] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
return parse.urlparse(lowerCAmelCase ).netloc
def lowerCamelCase_ ( lowerCAmelCase: str = "https://github.com" )-> list[str]:
_snake_case : List[Any] = get_domain_name(lowerCAmelCase )
# Initialize the parser
_snake_case : Any = Parser(lowerCAmelCase )
try:
# Open URL
_snake_case : Union[str, Any] = requests.get(lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case : List[Any] = requests.get(lowerCAmelCase )
# Get the valid email.
_snake_case : List[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( lowerCAmelCase: Dict="no" , lowerCAmelCase: str = default_json_config_file , lowerCAmelCase: bool = False )-> Union[str, Any]:
_snake_case : str = Path(lowerCAmelCase )
path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_snake_case : Optional[int] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
_snake_case : Dict = num_gpus
_snake_case : str = False
if num_gpus > 1:
_snake_case : Optional[int] = 'MULTI_GPU'
else:
_snake_case : Dict = 'NO'
elif is_xpu_available() and use_xpu:
_snake_case : Optional[int] = torch.xpu.device_count()
_snake_case : Dict = num_xpus
_snake_case : Optional[int] = False
if num_xpus > 1:
_snake_case : Dict = 'MULTI_XPU'
else:
_snake_case : Optional[Any] = 'NO'
elif is_npu_available():
_snake_case : List[Any] = torch.npu.device_count()
_snake_case : int = num_npus
_snake_case : int = False
if num_npus > 1:
_snake_case : Optional[int] = 'MULTI_NPU'
else:
_snake_case : Tuple = 'NO'
else:
_snake_case : Optional[Any] = 0
_snake_case : Tuple = True
_snake_case : List[Any] = 1
_snake_case : Tuple = 'NO'
_snake_case : List[str] = ClusterConfig(**lowerCAmelCase )
config.to_json_file(lowerCAmelCase )
return path
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict )-> List[str]:
_snake_case : Tuple = parser.add_parser('default' , parents=lowerCAmelCase , help=lowerCAmelCase , formatter_class=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Dict:
_snake_case : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : UNetaDModel
a_ : ScoreSdeVeScheduler
def __init__( self : Any , UpperCamelCase : UNetaDModel , UpperCamelCase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase : int = 1 , UpperCamelCase : int = 20_00 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : Optional[Any] = self.unet.config.sample_size
_snake_case : List[Any] = (batch_size, 3, img_size, img_size)
_snake_case : List[str] = self.unet
_snake_case : Tuple = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma
_snake_case : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
self.scheduler.set_sigmas(UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_snake_case : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_snake_case : Tuple = self.unet(UpperCamelCase , UpperCamelCase ).sample
_snake_case : List[str] = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# prediction step
_snake_case : Dict = model(UpperCamelCase , UpperCamelCase ).sample
_snake_case : int = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
_snake_case , _snake_case : Any = output.prev_sample, output.prev_sample_mean
_snake_case : List[str] = sample_mean.clamp(0 , 1 )
_snake_case : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case : List[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase )
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : AutoencoderKL , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase : StableDiffusionSafetyChecker , UpperCamelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , UpperCamelCase : Optional[torch.FloatTensor] = None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : int = 1
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : str = len(UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase , UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(UpperCamelCase )}.""" )
# get prompt text embeddings
_snake_case : List[Any] = self.tokenizer(
UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_snake_case : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_snake_case , _snake_case , _snake_case : str = text_embeddings.shape
_snake_case : List[Any] = text_embeddings.repeat(1 , UpperCamelCase , 1 )
_snake_case : int = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_snake_case : List[str]
if negative_prompt is None:
_snake_case : str = ['']
elif type(UpperCamelCase ) is not type(UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !="""
f""" {type(UpperCamelCase )}.""" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : Optional[int] = [negative_prompt]
elif batch_size != len(UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_snake_case : str = negative_prompt
_snake_case : List[Any] = text_input_ids.shape[-1]
_snake_case : List[Any] = self.tokenizer(
UpperCamelCase , padding='max_length' , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' , )
_snake_case : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_snake_case : Optional[Any] = uncond_embeddings.shape[1]
_snake_case : List[str] = uncond_embeddings.repeat(UpperCamelCase , UpperCamelCase , 1 )
_snake_case : str = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_snake_case : Any = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device='cpu' , dtype=UpperCamelCase ).to(self.device )
_snake_case : List[str] = torch.randn(UpperCamelCase , generator=UpperCamelCase , device='cpu' , dtype=UpperCamelCase ).to(
self.device )
else:
_snake_case : str = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
_snake_case : List[str] = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_snake_case : Optional[Any] = latents_reference.to(self.device )
_snake_case : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
_snake_case : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
_snake_case : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_snake_case : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_snake_case : Tuple = 0 if dx < 0 else dx
_snake_case : Optional[int] = 0 if dy < 0 else dy
_snake_case : Union[str, Any] = max(-dx , 0 )
_snake_case : List[str] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_snake_case : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_snake_case : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_snake_case : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case : List[str] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case : str = {}
if accepts_eta:
_snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case : Union[str, Any] = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
_snake_case : Optional[int] = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_snake_case , _snake_case : Union[str, Any] = noise_pred.chunk(2 )
_snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_snake_case : Optional[int] = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = 1 / 0.1_82_15 * latents
_snake_case : Any = self.vae.decode(UpperCamelCase ).sample
_snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_snake_case : Tuple = self.feature_extractor(self.numpy_to_pil(UpperCamelCase ) , return_tensors='pt' ).to(
self.device )
_snake_case , _snake_case : Optional[int] = self.safety_checker(
images=UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_snake_case : Union[str, Any] = None
if output_type == "pil":
_snake_case : Any = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCAmelCase_ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCAmelCase_ = 0
for log in Path().glob("""*.log"""):
lowerCAmelCase_ = 0
with open(log, """r""") as f:
for line in f:
lowerCAmelCase_ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCAmelCase_ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCAmelCase_ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase_ = []
log.unlink()
lowerCAmelCase_ = """"""
lowerCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
for test in failed_tests:
lowerCAmelCase_ = test[0].split("""::""")
lowerCAmelCase_ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase_ = [test[0] for test in failed_table]
lowerCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase_ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCAmelCase_ = """Too many failed tests, please see the full report in the Action results."""
lowerCAmelCase_ = len(err) + 10
lowerCAmelCase_ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCAmelCase_ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCAmelCase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCAmelCase_ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCAmelCase_ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase_ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase_ = row[0]
else:
lowerCAmelCase_ = """"""
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any ="""char"""
a_ : Union[str, Any] ="""bpe"""
a_ : Optional[Any] ="""wp"""
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =["""image_processor""", """char_tokenizer"""]
a_ : Tuple ="""ViTImageProcessor"""
a_ : Optional[int] ="""MgpstrTokenizer"""
def __init__( self : Optional[Any] , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : List[Any] = kwargs.pop('feature_extractor' )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_snake_case : Tuple = tokenizer
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
_snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : int , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_snake_case : Union[str, Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
_snake_case : Optional[Any] = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : Dict = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = sequences
_snake_case : int = char_preds.size(0 )
_snake_case , _snake_case : Optional[Any] = self._decode_helper(UpperCamelCase , 'char' )
_snake_case , _snake_case : List[Any] = self._decode_helper(UpperCamelCase , 'bpe' )
_snake_case , _snake_case : str = self._decode_helper(UpperCamelCase , 'wp' )
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = []
for i in range(UpperCamelCase ):
_snake_case : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_snake_case : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_snake_case : List[str] = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = final_strs
_snake_case : List[str] = final_scores
_snake_case : Tuple = char_strs
_snake_case : Tuple = bpe_strs
_snake_case : List[Any] = wp_strs
return out
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_snake_case : Optional[Any] = self.char_decode
_snake_case : Dict = 1
_snake_case : Optional[Any] = '[s]'
elif format == DecodeType.BPE:
_snake_case : Any = self.bpe_decode
_snake_case : int = 2
_snake_case : Union[str, Any] = '#'
elif format == DecodeType.WORDPIECE:
_snake_case : Optional[Any] = self.wp_decode
_snake_case : int = 1_02
_snake_case : str = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
_snake_case , _snake_case : int = [], []
_snake_case : Optional[int] = pred_logits.size(0 )
_snake_case : List[str] = pred_logits.size(1 )
_snake_case , _snake_case : Dict = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
_snake_case : str = preds_index.view(-1 , UpperCamelCase )[:, 1:]
_snake_case : Union[str, Any] = decoder(UpperCamelCase )
_snake_case , _snake_case : Optional[Any] = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
_snake_case : Any = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
_snake_case : List[str] = preds_str[index].find(UpperCamelCase )
_snake_case : Tuple = preds_str[index][:pred_eos]
_snake_case : str = preds_index[index].cpu().tolist()
_snake_case : int = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
_snake_case : Tuple = preds_max_prob[index][: pred_eos_index + 1]
_snake_case : int = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : List[str] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : str=99 , UpperCamelCase : str=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Union[str, Any]=9 , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Dict=32 , UpperCamelCase : Any=5 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=37 , UpperCamelCase : Dict=8 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=0.0_02 , UpperCamelCase : str=1 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : Tuple = batch_size
_snake_case : Dict = encoder_seq_length
_snake_case : Dict = decoder_seq_length
# For common tests
_snake_case : Union[str, Any] = self.decoder_seq_length
_snake_case : Dict = is_training
_snake_case : str = use_attention_mask
_snake_case : Optional[Any] = use_labels
_snake_case : Optional[Any] = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : Union[str, Any] = d_ff
_snake_case : List[Any] = relative_attention_num_buckets
_snake_case : int = dropout_rate
_snake_case : int = initializer_factor
_snake_case : Optional[int] = eos_token_id
_snake_case : Any = pad_token_id
_snake_case : str = decoder_start_token_id
_snake_case : Optional[int] = None
_snake_case : List[Any] = decoder_layers
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
_snake_case : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_snake_case : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_snake_case : Any = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
_snake_case : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
_snake_case : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_snake_case : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_snake_case : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
_snake_case : Tuple = self.get_config()
_snake_case : int = config.num_attention_heads
_snake_case : Optional[int] = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
_snake_case : Optional[Any] = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[int] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
_snake_case : Dict = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
_snake_case : List[Any] = result.last_hidden_state
_snake_case : int = result.past_key_values
_snake_case : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , ):
'''simple docstring'''
_snake_case : str = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
_snake_case : Optional[Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
_snake_case : Dict = model(UpperCamelCase )
_snake_case : Union[str, Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
_snake_case , _snake_case : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case : Union[str, Any] = model(UpperCamelCase )['last_hidden_state']
_snake_case : Optional[Any] = model(UpperCamelCase , past_key_values=UpperCamelCase )['last_hidden_state']
# select random slice
_snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
_snake_case : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : Optional[int] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
_snake_case : int = model(**UpperCamelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ : Union[str, Any] =(UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ : List[str] =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] =True
a_ : Tuple =False
a_ : Dict =False
a_ : List[Any] =True
a_ : str =True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ : Optional[Any] =[0.8, 0.9]
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
_snake_case : Optional[int] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
_snake_case : List[str] = config_and_inputs[0]
_snake_case : Tuple = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
_snake_case : List[str] = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
_snake_case : Tuple = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_snake_case : int = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
_snake_case : str = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_snake_case : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCamelCase ).to(UpperCamelCase )
_snake_case : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCamelCase , legacy=UpperCamelCase )
_snake_case : List[Any] = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_snake_case : Optional[int] = tokenizer(UpperCamelCase , return_tensors='pt' , padding=UpperCamelCase ).input_ids
# fmt: off
_snake_case : List[Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = model.generate(input_ids.to(UpperCamelCase ) )
_snake_case : List[str] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_snake_case : List[Any] = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase : List[str] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: int )-> float:
_snake_case : Tuple = u
for i in range(1 , lowerCAmelCase ):
_snake_case : Optional[int] = temp * (u - i)
return temp
def lowerCamelCase_ ( )-> None:
_snake_case : Optional[Any] = int(input('enter the numbers of values: ' ) )
_snake_case : list[list[float]] = []
for _ in range(lowerCAmelCase ):
y.append([] )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
y[i].append(lowerCAmelCase )
_snake_case : int = 0
print('enter the values of parameters in a list: ' )
_snake_case : Tuple = list(map(lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCAmelCase ):
_snake_case : Dict = float(input() )
_snake_case : Tuple = int(input('enter the value to interpolate: ' ) )
_snake_case : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase ):
for j in range(n - i ):
_snake_case : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_snake_case : Union[str, Any] = y[0][0]
for i in range(1 , lowerCAmelCase ):
summ += (ucal(lowerCAmelCase , lowerCAmelCase ) * y[0][i]) / math.factorial(lowerCAmelCase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] ="""roformer"""
def __init__( self : str , UpperCamelCase : Union[str, Any]=5_00_00 , UpperCamelCase : str=None , UpperCamelCase : Dict=7_68 , UpperCamelCase : str=12 , UpperCamelCase : Dict=12 , UpperCamelCase : str=30_72 , UpperCamelCase : int="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : int=15_36 , UpperCamelCase : str=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Optional[int]=1e-1_2 , UpperCamelCase : str=0 , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]=True , **UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : int = intermediate_size
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Optional[Any] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : str = rotary_value
_snake_case : str = use_cache
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] ="""roberta-prelayernorm"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : List[Any]=12 , UpperCamelCase : List[str]=30_72 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : int=2 , UpperCamelCase : int=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Dict=0 , UpperCamelCase : int=2 , UpperCamelCase : Optional[Any]="absolute" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : Any = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : List[str] = position_embedding_type
_snake_case : List[Any] = use_cache
_snake_case : Union[str, Any] = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =1
@register_to_config
def __init__( self : Dict , UpperCamelCase : int=20_00 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : List[str]=20 , UpperCamelCase : Dict=1e-3 ):
'''simple docstring'''
_snake_case : str = None
_snake_case : int = None
_snake_case : int = None
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_snake_case : Optional[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_snake_case : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_snake_case : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
_snake_case : Optional[Any] = std.unsqueeze(-1 )
_snake_case : Dict = -score / std
# compute
_snake_case : Dict = -1.0 / len(self.timesteps )
_snake_case : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_snake_case : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_snake_case : List[str] = beta_t.unsqueeze(-1 )
_snake_case : List[Any] = -0.5 * beta_t * x
_snake_case : Union[str, Any] = torch.sqrt(UpperCamelCase )
_snake_case : List[str] = drift - diffusion**2 * score
_snake_case : int = x + drift * dt
# add noise
_snake_case : Any = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype )
_snake_case : Tuple = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""trocr"""
a_ : List[Any] =["""past_key_values"""]
a_ : Any ={
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Union[str, Any] , UpperCamelCase : Dict=5_02_65 , UpperCamelCase : Optional[int]=10_24 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : Dict=16 , UpperCamelCase : int=40_96 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : int=0.1 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Any=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : int=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
_snake_case : Tuple = vocab_size
_snake_case : Optional[int] = d_model
_snake_case : Dict = decoder_layers
_snake_case : Union[str, Any] = decoder_attention_heads
_snake_case : Optional[Any] = decoder_ffn_dim
_snake_case : Optional[int] = activation_function
_snake_case : List[Any] = max_position_embeddings
_snake_case : Optional[Any] = dropout
_snake_case : Any = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : Union[str, Any] = init_std
_snake_case : Optional[Any] = decoder_layerdrop
_snake_case : Any = use_cache
_snake_case : Optional[int] = scale_embedding
_snake_case : int = use_learned_position_embeddings
_snake_case : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , )
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.dummy_uncond_unet
_snake_case : Dict = DDIMScheduler()
_snake_case : Any = self.dummy_vq_model
_snake_case : Optional[Any] = LDMPipeline(unet=UpperCamelCase , vqvae=UpperCamelCase , scheduler=UpperCamelCase )
ldm.to(UpperCamelCase )
ldm.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Optional[Any] = ldm(generator=UpperCamelCase , num_inference_steps=2 , output_type='numpy' ).images
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Optional[int] = ldm(generator=UpperCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=UpperCamelCase )[0]
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : List[str] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_snake_case : Optional[int] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(UpperCamelCase )
ldm.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Optional[int] = ldm(generator=UpperCamelCase , num_inference_steps=5 , output_type='numpy' ).images
_snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case : str = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_snake_case : List[str] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
lowerCAmelCase_ = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = """T5Config"""
def lowerCamelCase_ ( lowerCAmelCase: jnp.array , lowerCAmelCase: int , lowerCAmelCase: int )-> jnp.ndarray:
_snake_case : Dict = jnp.zeros_like(lowerCAmelCase )
_snake_case : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_snake_case : Optional[Any] = shifted_input_ids.at[:, 0].set(lowerCAmelCase )
_snake_case : Tuple = jnp.where(shifted_input_ids == -1_00 , lowerCAmelCase , lowerCAmelCase )
return shifted_input_ids
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""mt5"""
a_ : Dict =MTaConfig
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""mt5"""
a_ : Any =MTaConfig
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] ="""mt5"""
a_ : int =MTaConfig
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: PreTrainedTokenizer , lowerCAmelCase: int , lowerCAmelCase: Optional[int] = None , )-> str:
_snake_case : Any = {}
if train_file is not None:
_snake_case : Tuple = [train_file]
if eval_file is not None:
_snake_case : Optional[Any] = [eval_file]
if test_file is not None:
_snake_case : Union[str, Any] = [test_file]
_snake_case : Any = datasets.load_dataset('csv' , data_files=lowerCAmelCase )
_snake_case : Dict = list(ds[list(files.keys() )[0]].features.keys() )
_snake_case : Dict = features_name.pop(lowerCAmelCase )
_snake_case : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_snake_case : Union[str, Any] = {label: i for i, label in enumerate(lowerCAmelCase )}
_snake_case : Tuple = tokenizer.model_input_names
_snake_case : Dict = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
_snake_case : Tuple = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) , batched=lowerCAmelCase , )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
_snake_case : Dict = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , ) , batched=lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_snake_case : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
_snake_case : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_snake_case : Tuple = {k: v for k, v in ex.items() if k in input_names}
_snake_case : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_snake_case : Any = {k: v for k, v in ex.items() if k in input_names}
_snake_case : Any = labelaid[ex[label_name]]
yield (d, label)
_snake_case : Any = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_snake_case : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_snake_case : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_snake_case : List[str] = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_snake_case : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =field(metadata={"""help""": """Which column contains the label"""} )
a_ : str =field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the training file"""} )
a_ : Optional[str] =field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the development file"""} )
a_ : Optional[str] =field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the test file"""} )
a_ : int =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : bool =field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCamelCase_ ( )-> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_snake_case , _snake_case , _snake_case : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_snake_case : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase: EvalPrediction ) -> Dict:
_snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_snake_case : Dict = TFTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , compute_metrics=lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : Dict = trainer.evaluate()
_snake_case : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase_ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : GenericTensor ):
'''simple docstring'''
if self.framework == "tf":
_snake_case : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_snake_case : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : GenericTensor ):
'''simple docstring'''
_snake_case : int = self.get_masked_index(UpperCamelCase )
_snake_case : int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : GenericTensor ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=None , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if return_tensors is None:
_snake_case : int = self.framework
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase )
self.ensure_exactly_one_mask_token(UpperCamelCase )
return model_inputs
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.model(**UpperCamelCase )
_snake_case : int = model_inputs['input_ids']
return model_outputs
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=5 , UpperCamelCase : Any=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
_snake_case : Optional[Any] = target_ids.shape[0]
_snake_case : Any = model_outputs['input_ids'][0]
_snake_case : Optional[int] = model_outputs['logits']
if self.framework == "tf":
_snake_case : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_snake_case : List[Any] = outputs.numpy()
_snake_case : Tuple = outputs[0, masked_index, :]
_snake_case : int = stable_softmax(UpperCamelCase , axis=-1 )
if target_ids is not None:
_snake_case : Optional[Any] = tf.gather_nd(tf.squeeze(UpperCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_snake_case : Optional[int] = tf.expand_dims(UpperCamelCase , 0 )
_snake_case : List[str] = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
_snake_case , _snake_case : Dict = topk.values.numpy(), topk.indices.numpy()
else:
_snake_case : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_snake_case : Optional[int] = outputs[0, masked_index, :]
_snake_case : Any = logits.softmax(dim=-1 )
if target_ids is not None:
_snake_case : Optional[int] = probs[..., target_ids]
_snake_case , _snake_case : int = probs.topk(UpperCamelCase )
_snake_case : Any = []
_snake_case : Any = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_snake_case : List[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_snake_case : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_snake_case : Optional[int] = target_ids[p].tolist()
_snake_case : Tuple = p
# Filter padding out:
_snake_case : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_snake_case : Optional[int] = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
_snake_case : str = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(UpperCamelCase )
result.append(UpperCamelCase )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : str=None ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : int = [targets]
try:
_snake_case : int = self.tokenizer.get_vocab()
except Exception:
_snake_case : Dict = {}
_snake_case : int = []
for target in targets:
_snake_case : Dict = vocab.get(UpperCamelCase , UpperCamelCase )
if id_ is None:
_snake_case : str = self.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , max_length=1 , truncation=UpperCamelCase , )['input_ids']
if len(UpperCamelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
_snake_case : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_snake_case : List[Any] = list(set(UpperCamelCase ) )
if len(UpperCamelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
_snake_case : Union[str, Any] = np.array(UpperCamelCase )
return target_ids
def UpperCamelCase_ ( self : Any , UpperCamelCase : str=None , UpperCamelCase : Any=None ):
'''simple docstring'''
_snake_case : Union[str, Any] = {}
if targets is not None:
_snake_case : Tuple = self.get_target_ids(UpperCamelCase , UpperCamelCase )
_snake_case : Dict = target_ids
if top_k is not None:
_snake_case : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Tuple , UpperCamelCase : Dict , *UpperCamelCase : str , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = super().__call__(UpperCamelCase , **UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) == 1:
return outputs[0]
return outputs
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_snake_case : Any = model
_snake_case : Optional[int] = kwargs.get('model_save_dir' , UpperCamelCase )
_snake_case : Optional[Any] = kwargs.get('latest_model_name' , UpperCamelCase )
def __call__( self : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = {k: np.array(UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase , UpperCamelCase )
@staticmethod
def UpperCamelCase_ ( UpperCamelCase : Union[str, Path] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None ):
'''simple docstring'''
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_snake_case : str = 'CPUExecutionProvider'
return ort.InferenceSession(UpperCamelCase , providers=[provider] , sess_options=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Path] , UpperCamelCase : Optional[str] = None , **UpperCamelCase : str ):
'''simple docstring'''
_snake_case : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_snake_case : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
_snake_case : Dict = Path(UpperCamelCase ).joinpath(UpperCamelCase )
try:
shutil.copyfile(UpperCamelCase , UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_snake_case : Optional[Any] = self.model_save_dir.joinpath(UpperCamelCase )
if src_path.exists():
_snake_case : Union[str, Any] = Path(UpperCamelCase ).joinpath(UpperCamelCase )
try:
shutil.copyfile(UpperCamelCase , UpperCamelCase )
except shutil.SameFileError:
pass
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
# saving model weights/files
self._save_pretrained(UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Tuple , UpperCamelCase : Union[str, Path] , UpperCamelCase : Optional[Union[bool, str, None]] = None , UpperCamelCase : Optional[Union[str, None]] = None , UpperCamelCase : bool = False , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional["ort.SessionOptions"] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
_snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase ):
_snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase , UpperCamelCase ) , provider=UpperCamelCase , sess_options=UpperCamelCase )
_snake_case : Union[str, Any] = Path(UpperCamelCase )
# load model from hub
else:
# download model
_snake_case : List[str] = hf_hub_download(
repo_id=UpperCamelCase , filename=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , )
_snake_case : str = Path(UpperCamelCase ).parent
_snake_case : str = Path(UpperCamelCase ).name
_snake_case : List[Any] = OnnxRuntimeModel.load_model(UpperCamelCase , provider=UpperCamelCase , sess_options=UpperCamelCase )
return cls(model=UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : List[str] , UpperCamelCase : Union[str, Path] , UpperCamelCase : bool = True , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : List[str] = None
if len(str(UpperCamelCase ).split('@' ) ) == 2:
_snake_case , _snake_case : List[Any] = model_id.split('@' )
return cls._from_pretrained(
model_id=UpperCamelCase , revision=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , use_auth_token=UpperCamelCase , **UpperCamelCase , )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase_ ( )-> Any:
_snake_case : int = 9
_snake_case : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case : List[str] = kruskal(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase ) == sorted(lowerCAmelCase )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
import numpy as np
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
_snake_case : Any = (0, 0)
_snake_case : Any = None
_snake_case : List[Any] = 0
_snake_case : Union[str, Any] = 0
_snake_case : Union[str, Any] = 0
def __eq__( self : int , UpperCamelCase : Dict ):
'''simple docstring'''
return self.position == cell.position
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
print(self.position )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : str=(5, 5) ):
'''simple docstring'''
_snake_case : Tuple = np.zeros(UpperCamelCase )
_snake_case : List[str] = world_size[0]
_snake_case : Optional[Any] = world_size[1]
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
print(self.w )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_snake_case : List[Any] = cell.position[0]
_snake_case : str = cell.position[1]
_snake_case : List[str] = []
for n in neughbour_cord:
_snake_case : Any = current_x + n[0]
_snake_case : List[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_snake_case : Optional[int] = Cell()
_snake_case : int = (x, y)
_snake_case : Any = cell
neighbours.append(UpperCamelCase )
return neighbours
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple )-> Optional[int]:
_snake_case : Dict = []
_snake_case : List[str] = []
_open.append(lowerCAmelCase )
while _open:
_snake_case : Union[str, Any] = np.argmin([n.f for n in _open] )
_snake_case : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase ):
for c in _closed:
if c == n:
continue
_snake_case : Dict = current.g + 1
_snake_case , _snake_case : Any = n.position
_snake_case , _snake_case : Dict = goal.position
_snake_case : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_snake_case : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase )
_snake_case : int = []
while current.parent is not None:
path.append(current.position )
_snake_case : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCAmelCase_ = Gridworld()
# Start position and goal
lowerCAmelCase_ = Cell()
lowerCAmelCase_ = (0, 0)
lowerCAmelCase_ = Cell()
lowerCAmelCase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowerCAmelCase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCAmelCase_ = 1
print(world.w)
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : str =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : ClassVar[Features] =Features({"""audio""": Audio()} )
a_ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
a_ : str ="audio"
a_ : str ="labels"
def UpperCamelCase_ ( self : int , UpperCamelCase : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_snake_case : List[Any] = copy.deepcopy(self )
_snake_case : Union[str, Any] = self.label_schema.copy()
_snake_case : List[str] = features[self.label_column]
_snake_case : Optional[Any] = label_schema
return task_template
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase_ = True
except ImportError:
lowerCAmelCase_ = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase_ = _get_torch_home()
except ImportError:
lowerCAmelCase_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCAmelCase_ = os.path.join(torch_cache_home, """transformers""")
lowerCAmelCase_ = """https://cdn.huggingface.co"""
lowerCAmelCase_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCAmelCase_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCAmelCase_ = os.path.join(PATH, """config.yaml""")
lowerCAmelCase_ = os.path.join(PATH, """attributes.txt""")
lowerCAmelCase_ = os.path.join(PATH, """objects.txt""")
lowerCAmelCase_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCAmelCase_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase_ = """pytorch_model.bin"""
lowerCAmelCase_ = """config.yaml"""
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any]=OBJECTS , lowerCAmelCase: Optional[Any]=ATTRIBUTES )-> int:
_snake_case : Optional[int] = []
with open(lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
_snake_case : Tuple = []
with open(lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> List[str]:
_snake_case : Optional[int] = OrderedDict()
with open(lowerCAmelCase , 'rb' ) as f:
_snake_case : str = pkl.load(lowerCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
_snake_case : Tuple = ckp.pop(lowerCAmelCase )
if isinstance(lowerCAmelCase , np.ndarray ):
_snake_case : Dict = torch.tensor(lowerCAmelCase )
else:
assert isinstance(lowerCAmelCase , torch.tensor ), type(lowerCAmelCase )
_snake_case : List[Any] = v
return r
class _lowerCAmelCase :
'''simple docstring'''
a_ : List[Any] ={}
def __init__( self : Optional[Any] , UpperCamelCase : dict , UpperCamelCase : str = "root" , UpperCamelCase : Dict=0 ):
'''simple docstring'''
_snake_case : List[Any] = name
_snake_case : str = level
_snake_case : List[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_snake_case : Optional[int] = copy.deepcopy(UpperCamelCase )
_snake_case : List[str] = copy.deepcopy(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : str = Config(UpperCamelCase , name=UpperCamelCase , level=level + 1 )
_snake_case : str = v
setattr(self , UpperCamelCase , UpperCamelCase )
_snake_case : Any = d
def __repr__( self : Dict ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = val
_snake_case : List[str] = val
_snake_case : Optional[Any] = key.split('.' )
_snake_case : Dict = len(UpperCamelCase ) - 1
_snake_case : int = self._pointer
if len(UpperCamelCase ) > 1:
for i, l in enumerate(UpperCamelCase ):
if hasattr(self , UpperCamelCase ) and isinstance(getattr(self , UpperCamelCase ) , UpperCamelCase ):
setattr(getattr(self , UpperCamelCase ) , '.'.join(levels[i:] ) , UpperCamelCase )
if l == last_level:
_snake_case : Optional[Any] = val
else:
_snake_case : Any = pointer[l]
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._pointer
def UpperCamelCase_ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
with open(f"""{file_name}""" , 'w' ) as stream:
dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
with open(f"""{file_name}""" , 'w' ) as stream:
json.dump(UpperCamelCase , UpperCamelCase )
@staticmethod
def UpperCamelCase_ ( UpperCamelCase : Tuple ):
'''simple docstring'''
with open(UpperCamelCase ) as stream:
_snake_case : Optional[Any] = load(UpperCamelCase , Loader=UpperCamelCase )
return data
def __str__( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = ' '
if self._name != "root":
_snake_case : Tuple = f"""{t * (self._level-1)}{self._name}:\n"""
else:
_snake_case : Union[str, Any] = ''
_snake_case : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase , UpperCamelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(UpperCamelCase ).__name__})\n"""
_snake_case : List[str] = level
return r[:-1]
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Dict = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
return cls(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Any , UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : int = kwargs.pop('cache_dir' , UpperCamelCase )
_snake_case : str = kwargs.pop('force_download' , UpperCamelCase )
_snake_case : Dict = kwargs.pop('resume_download' , UpperCamelCase )
_snake_case : List[Any] = kwargs.pop('proxies' , UpperCamelCase )
_snake_case : List[Any] = kwargs.pop('local_files_only' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
_snake_case : Any = os.path.join(UpperCamelCase , UpperCamelCase )
elif os.path.isfile(UpperCamelCase ) or is_remote_url(UpperCamelCase ):
_snake_case : List[Any] = pretrained_model_name_or_path
else:
_snake_case : Dict = hf_bucket_url(UpperCamelCase , filename=UpperCamelCase , use_cdn=UpperCamelCase )
try:
# Load from URL or cache if already cached
_snake_case : Any = cached_path(
UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_snake_case : Any = Config.load_yaml(UpperCamelCase )
except EnvironmentError:
_snake_case : Tuple = 'Can\'t load config for'
raise EnvironmentError(UpperCamelCase )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(UpperCamelCase ), kwargs
def lowerCamelCase_ ( lowerCAmelCase: Any )-> List[Any]:
_snake_case : int = torch.load('dump.pt' , map_location=in_tensor.device )
_snake_case : str = in_tensor.numpy()
_snake_case : str = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCAmelCase , lowerCAmelCase , rtol=0.0_1 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(lowerCAmelCase , lowerCAmelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Optional[int]:
_snake_case : Union[str, Any] = urlparse(lowerCAmelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Optional[Any]=True )-> str:
_snake_case : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_snake_case : Optional[Any] = '/' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Dict , lowerCAmelCase: Any=None , lowerCAmelCase: str=0 , lowerCAmelCase: str=None , )-> Optional[int]:
_snake_case : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(lowerCAmelCase , lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
ua += "; " + user_agent
_snake_case : str = {'user-agent': ua}
if resume_size > 0:
_snake_case : List[Any] = 'bytes=%d-' % (resume_size,)
_snake_case : List[Any] = requests.get(lowerCAmelCase , stream=lowerCAmelCase , proxies=lowerCAmelCase , headers=lowerCAmelCase )
if response.status_code == 4_16: # Range not satisfiable
return
_snake_case : Dict = response.headers.get('Content-Length' )
_snake_case : Any = resume_size + int(lowerCAmelCase ) if content_length is not None else None
_snake_case : Optional[Any] = tqdm(
unit='B' , unit_scale=lowerCAmelCase , total=lowerCAmelCase , initial=lowerCAmelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase ) )
temp_file.write(lowerCAmelCase )
progress.close()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Any=False , lowerCAmelCase: Tuple=None , lowerCAmelCase: List[Any]=10 , lowerCAmelCase: Optional[int]=False , lowerCAmelCase: Optional[int]=None , lowerCAmelCase: Dict=False , )-> Optional[Any]:
if cache_dir is None:
_snake_case : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = str(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_snake_case : str = None
if not local_files_only:
try:
_snake_case : List[str] = requests.head(lowerCAmelCase , allow_redirects=lowerCAmelCase , proxies=lowerCAmelCase , timeout=lowerCAmelCase )
if response.status_code == 2_00:
_snake_case : Tuple = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_snake_case : List[str] = url_to_filename(lowerCAmelCase , lowerCAmelCase )
# get cache path to put the file
_snake_case : List[str] = os.path.join(lowerCAmelCase , lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase ):
return cache_path
else:
_snake_case : Dict = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(lowerCAmelCase ) > 0:
return os.path.join(lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_snake_case : Optional[int] = cache_path + '.lock'
with FileLock(lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_snake_case : Tuple = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase , 'a+b' ) as f:
yield f
_snake_case : Optional[int] = _resumable_file_manager
if os.path.exists(lowerCAmelCase ):
_snake_case : List[str] = os.stat(lowerCAmelCase ).st_size
else:
_snake_case : Optional[Any] = 0
else:
_snake_case : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=lowerCAmelCase , delete=lowerCAmelCase )
_snake_case : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , lowerCAmelCase , temp_file.name , )
http_get(
lowerCAmelCase , lowerCAmelCase , proxies=lowerCAmelCase , resume_size=lowerCAmelCase , user_agent=lowerCAmelCase , )
os.replace(temp_file.name , lowerCAmelCase )
_snake_case : Optional[Any] = {'url': url, 'etag': etag}
_snake_case : Dict = cache_path + '.json'
with open(lowerCAmelCase , 'w' ) as meta_file:
json.dump(lowerCAmelCase , lowerCAmelCase )
return cache_path
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Dict=None )-> Tuple:
_snake_case : Tuple = url.encode('utf-8' )
_snake_case : Optional[Any] = shaaaa(lowerCAmelCase )
_snake_case : Optional[int] = url_hash.hexdigest()
if etag:
_snake_case : int = etag.encode('utf-8' )
_snake_case : List[str] = shaaaa(lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple=None , lowerCAmelCase: Union[str, Any]=False , lowerCAmelCase: Optional[int]=None , lowerCAmelCase: str=False , lowerCAmelCase: List[Any]=None , lowerCAmelCase: Tuple=False , lowerCAmelCase: Dict=False , lowerCAmelCase: Optional[int]=False , )-> List[Any]:
if cache_dir is None:
_snake_case : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Tuple = str(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = str(lowerCAmelCase )
if is_remote_url(lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
_snake_case : List[Any] = get_from_cache(
lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , user_agent=lowerCAmelCase , local_files_only=lowerCAmelCase , )
elif os.path.exists(lowerCAmelCase ):
# File, and it exists.
_snake_case : List[str] = url_or_filename
elif urlparse(lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase ) and not tarfile.is_tarfile(lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_snake_case , _snake_case : List[str] = os.path.split(lowerCAmelCase )
_snake_case : List[Any] = output_file.replace('.' , '-' ) + '-extracted'
_snake_case : List[Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ) and os.listdir(lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_snake_case : Optional[Any] = output_path + '.lock'
with FileLock(lowerCAmelCase ):
shutil.rmtree(lowerCAmelCase , ignore_errors=lowerCAmelCase )
os.makedirs(lowerCAmelCase )
if is_zipfile(lowerCAmelCase ):
with ZipFile(lowerCAmelCase , 'r' ) as zip_file:
zip_file.extractall(lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase ):
_snake_case : List[str] = tarfile.open(lowerCAmelCase )
tar_file.extractall(lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(lowerCAmelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Optional[int]="," )-> List[Any]:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ):
with open(lowerCAmelCase ) as f:
_snake_case : List[str] = eval(f.read() )
else:
_snake_case : Optional[int] = requests.get(lowerCAmelCase )
try:
_snake_case : Optional[int] = requests.json()
except Exception:
_snake_case : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_snake_case : List[str] = eval(lowerCAmelCase )
except Exception:
_snake_case : Tuple = data.split('\n' )
req.close()
return data
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
_snake_case : Tuple = requests.get(lowerCAmelCase )
_snake_case : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase_ ( lowerCAmelCase: str )-> List[Any]:
_snake_case : Union[str, Any] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase )
with open(lowerCAmelCase , 'rb' ) as stream:
_snake_case : Dict = pkl.load(lowerCAmelCase )
_snake_case : Tuple = weights.pop('model' )
_snake_case : Any = {}
for k, v in model.items():
_snake_case : Any = torch.from_numpy(lowerCAmelCase )
if "running_var" in k:
_snake_case : List[str] = torch.tensor([0] )
_snake_case : List[str] = k.replace('running_var' , 'num_batches_tracked' )
_snake_case : Tuple = zero
return new
def lowerCamelCase_ ( )-> str:
print(F"""{os.path.abspath(os.path.join(lowerCAmelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any]="RGB" )-> List[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ):
_snake_case : Tuple = cva.imread(lowerCAmelCase )
else:
_snake_case : Any = get_image_from_url(lowerCAmelCase )
assert img is not None, F"""could not connect to: {im}"""
_snake_case : Union[str, Any] = cva.cvtColor(lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_snake_case : Dict = img[:, :, ::-1]
return img
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: List[Any]=1 )-> List[str]:
return (images[i : i + batch] for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ))
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_ ( lowerCAmelCase: Union[dict, list, tuple, torch.Tensor] )-> List[Tuple[int, ...]]:
_snake_case : Dict = []
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCAmelCase ) )
elif isinstance(lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCAmelCase ) )
elif isinstance(lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Tuple[int, ...] )-> Tuple[int, ...]:
_snake_case : Any = []
for d in reversed(lowerCAmelCase ):
idx.append(flat_idx % d )
_snake_case : Optional[Any] = flat_idx // d
return tuple(reversed(lowerCAmelCase ) )
@torch.jit.ignore
def lowerCamelCase_ ( lowerCAmelCase: Sequence[int] , lowerCAmelCase: Sequence[int] , lowerCAmelCase: Sequence[int] , lowerCAmelCase: Optional[Sequence[bool]] = None , lowerCAmelCase: Optional[Sequence[bool]] = None , )-> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCAmelCase: List[bool] ) -> None:
_snake_case : List[str] = True
for i in range(len(lowerCAmelCase ) ):
_snake_case : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
_snake_case : Tuple = l[reversed_idx]
if start_edges is None:
_snake_case : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(lowerCAmelCase )
if end_edges is None:
_snake_case : int = [e == (d - 1) for e, d in zip(lowerCAmelCase , lowerCAmelCase )]
reduce_edge_list(lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCAmelCase ) == 0:
return [()]
elif len(lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_snake_case : List[Tuple[slice, ...]] = []
_snake_case : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCAmelCase , lowerCAmelCase ):
if s == e:
path_list.append(slice(lowerCAmelCase , s + 1 ) )
else:
break
_snake_case : Tuple[slice, ...] = tuple(lowerCAmelCase )
_snake_case : Union[str, Any] = len(lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_snake_case : Tuple = start[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_snake_case : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_snake_case : Any = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCamelCase_ ( lowerCAmelCase: torch.Tensor , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> torch.Tensor:
_snake_case : int = t.shape[:no_batch_dims]
_snake_case : str = list(_flat_idx_to_idx(lowerCAmelCase , lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
_snake_case : List[str] = list(_flat_idx_to_idx(flat_end - 1 , lowerCAmelCase ) )
# Get an ordered list of slices to perform
_snake_case : Optional[Any] = _get_minimal_slice_set(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
_snake_case : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCamelCase_ ( lowerCAmelCase: Callable , lowerCAmelCase: Dict[str, Any] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: Any = None , lowerCAmelCase: bool = False , )-> Any:
if not (len(lowerCAmelCase ) > 0):
raise ValueError('Must provide at least one input' )
_snake_case : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCAmelCase )]
_snake_case : Optional[int] = tuple([max(lowerCAmelCase ) for s in zip(*lowerCAmelCase )] )
def _prep_inputs(lowerCAmelCase: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_snake_case : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_snake_case : Dict = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_snake_case : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_snake_case : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCAmelCase )
_snake_case : Tuple = None
if _out is not None:
_snake_case : List[Any] = tensor_tree_map(lambda lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_snake_case : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_snake_case : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCAmelCase: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_snake_case : Optional[int] = 0
_snake_case : str = prepped_outputs
for _ in range(lowerCAmelCase ):
# Chunk the input
if not low_mem:
_snake_case : List[Any] = _select_chunk
else:
_snake_case : Any = partial(
_chunk_slice , flat_start=lowerCAmelCase , flat_end=min(lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(lowerCAmelCase ) , )
_snake_case : Dict[str, Any] = tensor_tree_map(lowerCAmelCase , lowerCAmelCase )
# Run the layer on the chunk
_snake_case : str = layer(**lowerCAmelCase )
# Allocate space for the output
if out is None:
_snake_case : Optional[int] = tensor_tree_map(lambda lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCAmelCase , lowerCAmelCase ):
def assign(lowerCAmelCase: dict , lowerCAmelCase: dict ) -> None:
for k, v in da.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
assign(lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_snake_case : str = da[k]
assign(lowerCAmelCase , lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
for xa, xa in zip(lowerCAmelCase , lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_snake_case : Tuple = xa
elif isinstance(lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_snake_case : str = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_snake_case : int = tensor_tree_map(lambda lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCAmelCase )
return out
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase : int = 5_12 , ):
'''simple docstring'''
_snake_case : Dict = max_chunk_size
_snake_case : Optional[int] = None
_snake_case : Optional[tuple] = None
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Callable , UpperCamelCase : tuple , UpperCamelCase : int ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_snake_case : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_snake_case : List[Any] = [c for c in candidates if c > min_chunk_size]
_snake_case : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase , chunk_size=UpperCamelCase )
return True
except RuntimeError:
return False
_snake_case : List[str] = 0
_snake_case : Any = len(UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
_snake_case : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
_snake_case : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
_snake_case : Tuple = i
_snake_case : Any = (i + len(UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Iterable , UpperCamelCase : Iterable ):
'''simple docstring'''
_snake_case : Optional[int] = True
for aa, aa in zip(UpperCamelCase , UpperCamelCase ):
assert type(UpperCamelCase ) == type(UpperCamelCase )
if isinstance(UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )]
_snake_case : Tuple = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Callable , UpperCamelCase : tuple , UpperCamelCase : int , ):
'''simple docstring'''
_snake_case : int = True
_snake_case : tuple = tree_map(lambda UpperCamelCase : a.shape if isinstance(UpperCamelCase , torch.Tensor ) else a , UpperCamelCase , UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase )
_snake_case : Tuple = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
_snake_case : Dict = False
if not consistent:
_snake_case : Optional[int] = self._determine_favorable_chunk_size(
UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_snake_case : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Any=False )-> Dict:
_snake_case : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case : str = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any]=False )-> str:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : Dict = ''
else:
_snake_case : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_snake_case : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : List[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> Tuple:
_snake_case : Optional[int] = dct.pop(lowerCAmelCase )
_snake_case : Optional[int] = val
def lowerCamelCase_ ( )-> List[Any]:
_snake_case : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : str = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Optional[int] )-> Optional[int]:
_snake_case : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case : Optional[int] = 10_00
_snake_case : Union[str, Any] = 'huggingface/label-files'
_snake_case : List[Any] = 'imagenet-1k-id2label.json'
_snake_case : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Optional[int] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : Any = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : Optional[Any] = int(deit_name[-6:-4] )
_snake_case : Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_snake_case : Dict = 1_92
_snake_case : Union[str, Any] = 7_68
_snake_case : Optional[int] = 12
_snake_case : Union[str, Any] = 3
elif deit_name[9:].startswith('small' ):
_snake_case : Union[str, Any] = 3_84
_snake_case : Optional[int] = 15_36
_snake_case : Optional[int] = 12
_snake_case : Tuple = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_snake_case : int = 10_24
_snake_case : Union[str, Any] = 40_96
_snake_case : Dict = 24
_snake_case : List[str] = 16
# load original model from timm
_snake_case : Dict = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case : Optional[int] = timm_model.state_dict()
_snake_case : Union[str, Any] = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_snake_case : Dict = DeiTForImageClassificationWithTeacher(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case : Union[str, Any] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case : str = DeiTImageProcessor(size=lowerCAmelCase , crop_size=config.image_size )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : int = encoding['pixel_values']
_snake_case : Union[str, Any] = model(lowerCAmelCase )
_snake_case : str = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import math
from datetime import datetime, timedelta
def lowerCamelCase_ ( lowerCAmelCase: int )-> datetime:
_snake_case : Optional[Any] = year % 19
_snake_case : List[Any] = year % 4
_snake_case : Optional[Any] = year % 7
_snake_case : List[str] = math.floor(year / 1_00 )
_snake_case : Optional[int] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_snake_case : Union[str, Any] = leap_day_inhibits / 4
_snake_case : List[str] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_snake_case : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_snake_case : List[str] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_snake_case : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase , 4 , 18 )
else:
return datetime(lowerCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowerCAmelCase_ = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Optional[int]:
_snake_case : List[str] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
_snake_case : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str] )-> Optional[int]:
if metric == "rouge2":
_snake_case : List[str] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_snake_case : Any = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_snake_case : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
_snake_case : Dict = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=F"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple )-> List[str]:
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : int = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def UpperCamelCase_ ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule , UpperCamelCase : str , UpperCamelCase : Dict=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_snake_case : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : Tuple = od / 'test_results.txt'
_snake_case : Union[str, Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Optional[Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case : Optional[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , 'a+' ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : Any = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
_snake_case : Optional[int] = val.item()
_snake_case : Union[str, Any] = f"""{key}: {val:.6f}\n"""
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_snake_case : Any = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase )
@rank_zero_only
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
try:
_snake_case : int = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Dict = pl_module.model.num_parameters()
_snake_case : Optional[Any] = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase_ ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , 'test' )
@rank_zero_only
def UpperCamelCase_ ( self : Dict , UpperCamelCase : pl.Trainer , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: np.ndarray , lowerCAmelCase: float )-> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase , (alpha * (np.exp(lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
import qiskit
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> qiskit.result.counts.Counts:
_snake_case : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_snake_case : Union[str, Any] = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case : int = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: str=1.0 , lowerCAmelCase: int=None , lowerCAmelCase: Union[str, Any]=None )-> Optional[int]:
if rng is None:
_snake_case : List[str] = global_rng
_snake_case : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any]=7 , UpperCamelCase : Tuple=4_00 , UpperCamelCase : int=20_00 , UpperCamelCase : List[str]=10 , UpperCamelCase : int=1_60 , UpperCamelCase : Optional[Any]=8 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[Any]=40_00 , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : List[Any] = min_seq_length
_snake_case : Dict = max_seq_length
_snake_case : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case : Optional[int] = padding_value
_snake_case : int = sampling_rate
_snake_case : Optional[int] = return_attention_mask
_snake_case : Union[str, Any] = do_normalize
_snake_case : int = feature_size
_snake_case : List[str] = chunk_length
_snake_case : List[Any] = hop_length
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[str]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase : Tuple ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
_snake_case : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case : List[str] = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = WhisperFeatureExtractionTester(self )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[int] = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
_snake_case : Optional[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase )
_snake_case : Dict = feat_extract_first.to_dict()
_snake_case : Union[str, Any] = feat_extract_second.to_dict()
_snake_case : Optional[Any] = feat_extract_first.mel_filters
_snake_case : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[Any] = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
_snake_case : str = self.feature_extraction_class.from_json_file(UpperCamelCase )
_snake_case : int = feat_extract_first.to_dict()
_snake_case : Union[str, Any] = feat_extract_second.to_dict()
_snake_case : Optional[int] = feat_extract_first.mel_filters
_snake_case : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case : Dict = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_snake_case : Optional[Any] = feature_extractor(UpperCamelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_snake_case : Dict = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_snake_case : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test batched
_snake_case : Tuple = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
_snake_case : List[str] = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_snake_case : Tuple = np.asarray(UpperCamelCase )
_snake_case : Tuple = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
_snake_case : Optional[int] = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
_snake_case : List[Any] = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
_snake_case : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_snake_case : Optional[Any] = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs_truncated]
_snake_case : Tuple = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
_snake_case : Any = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
import torch
_snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = np.random.rand(1_00 , 32 ).astype(np.floataa )
_snake_case : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case : str = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case : List[Any] = ds.sort('id' ).select(range(UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_snake_case : int = self._load_datasamples(1 )
_snake_case : int = WhisperFeatureExtractor()
_snake_case : Tuple = feature_extractor(UpperCamelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase , atol=1e-4 ) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : str = self._load_datasamples(1 )[0]
_snake_case : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_snake_case : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ) - 1 ) < 1e-3 ) )
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase_ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase_ ( lowerCAmelCase: str )-> Dict:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_snake_case : Optional[int] = list(s_dict.keys() )
for key in keys:
_snake_case : Union[str, Any] = R'.*/layers_(\d+)'
_snake_case : Union[str, Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , lowerCAmelCase )
_snake_case : Optional[Any] = R'(encoder|decoder)\/'
if re.match(lowerCAmelCase , lowerCAmelCase ):
_snake_case : int = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
_snake_case : Optional[Any] = re.sub(R'/mlp/' , R'/1/mlp/' , lowerCAmelCase )
_snake_case : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , lowerCAmelCase )
elif groups[0] == "decoder":
_snake_case : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , lowerCAmelCase )
_snake_case : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_snake_case : Optional[int] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(F"""{key} -> {new_key}""" )
_snake_case : List[str] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case : Any = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_snake_case : str = s_dict[key].shape[0]
_snake_case : Any = s_dict[key]
for idx in range(lowerCAmelCase ):
_snake_case : Tuple = expert_weihts[idx]
print(F"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase )
return s_dict
lowerCAmelCase_ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple )-> int:
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCAmelCase , 'r' ) as f:
_snake_case : Any = f.read()
_snake_case : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , lowerCAmelCase )
_snake_case : List[str] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_snake_case : Optional[Any] = float(lowerCAmelCase ) if '.' in value else int(lowerCAmelCase )
_snake_case : Any = re.findall(R'(.*activations) = \(\'(.*)\',\)' , lowerCAmelCase )[0]
_snake_case : Any = str(activation[1] )
_snake_case : int = num_experts
_snake_case : int = SwitchTransformersConfig(**lowerCAmelCase )
return config
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any]=None , lowerCAmelCase: Optional[Any]="./" , lowerCAmelCase: int=8 )-> List[Any]:
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
_snake_case : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
_snake_case : Optional[int] = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
_snake_case : Optional[Any] = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
_snake_case : Dict = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
_snake_case : Optional[Any] = flax_params['target']
_snake_case : Any = flatten_dict(lowerCAmelCase , sep='/' )
_snake_case : Union[str, Any] = rename_keys(lowerCAmelCase )
_snake_case : Optional[Any] = unflatten_dict(lowerCAmelCase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : bool =field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
a_ : int =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase_ ( )-> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
_snake_case : int = import_module('tasks' )
try:
_snake_case : List[Any] = getattr(lowerCAmelCase , model_args.task_type )
_snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_snake_case : Optional[int] = token_classification_task.get_labels(data_args.labels )
_snake_case : Dict[int, str] = dict(enumerate(lowerCAmelCase ) )
_snake_case : Optional[Any] = len(lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid={label: i for i, label in enumerate(lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
_snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_snake_case : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase , labels=lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case : int = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase , labels=lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase: np.ndarray , lowerCAmelCase: np.ndarray ) -> Tuple[List[int], List[int]]:
_snake_case : Tuple = np.argmax(lowerCAmelCase , axis=2 )
_snake_case , _snake_case : Union[str, Any] = preds.shape
_snake_case : int = [[] for _ in range(lowerCAmelCase )]
_snake_case : Any = [[] for _ in range(lowerCAmelCase )]
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase: EvalPrediction ) -> Dict:
_snake_case , _snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCAmelCase , lowerCAmelCase ),
"precision": precision_score(lowerCAmelCase , lowerCAmelCase ),
"recall": recall_score(lowerCAmelCase , lowerCAmelCase ),
"f1": fa_score(lowerCAmelCase , lowerCAmelCase ),
}
# Data collator
_snake_case : List[str] = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case : Dict = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , compute_metrics=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : Tuple = trainer.evaluate()
_snake_case : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowerCAmelCase , lowerCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(lowerCAmelCase )
# Predict
if training_args.do_predict:
_snake_case : Dict = TokenClassificationDataset(
token_classification_task=lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase , labels=lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_snake_case , _snake_case , _snake_case : Union[str, Any] = trainer.predict(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = align_predictions(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , lowerCAmelCase , lowerCAmelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_snake_case : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return results
def lowerCamelCase_ ( lowerCAmelCase: Any )-> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case : Tuple = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case : Any = {'unk_token': '<unk>'}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
_snake_case : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_snake_case : List[str] = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , **UpperCamelCase : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : Any = self.get_image_processor()
_snake_case : Tuple = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
_snake_case : Any = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
_snake_case : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : str = image_processor(UpperCamelCase , return_tensors='np' )
_snake_case : List[str] = processor(images=UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Optional[Any] = 'lower newer'
_snake_case : List[Any] = processor(text=UpperCamelCase )
_snake_case : Tuple = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Union[str, Any] = 'lower newer'
_snake_case : Any = self.prepare_image_inputs()
_snake_case : Dict = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : List[str] = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Optional[int] = processor.batch_decode(UpperCamelCase )
_snake_case : Dict = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Any = 'lower newer'
_snake_case : Tuple = self.prepare_image_inputs()
_snake_case : Tuple = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase_ = 3
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
print('Generating primitive root of p' )
while True:
_snake_case : int = random.randrange(3 , lowerCAmelCase )
if pow(lowerCAmelCase , 2 , lowerCAmelCase ) == 1:
continue
if pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) == 1:
continue
return g
def lowerCamelCase_ ( lowerCAmelCase: int )-> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_snake_case : Optional[int] = rabin_miller.generate_large_prime(lowerCAmelCase ) # select large prime number.
_snake_case : Tuple = primitive_root(lowerCAmelCase ) # one primitive root on modulo p.
_snake_case : List[str] = random.randrange(3 , lowerCAmelCase ) # private_key -> have to be greater than 2 for safety.
_snake_case : Any = cryptomath.find_mod_inverse(pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
_snake_case : int = (key_size, e_a, e_a, p)
_snake_case : Tuple = (key_size, d)
return public_key, private_key
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: int )-> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_snake_case , _snake_case : Tuple = generate_key(lowerCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def lowerCamelCase_ ( )-> None:
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: Tuple=1E-12 )-> int:
_snake_case : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCAmelCase , axis=1 ) , a_min=lowerCAmelCase ) ).T
_snake_case : Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCAmelCase , axis=1 ) , a_min=lowerCAmelCase ) ).T
return jnp.matmul(lowerCAmelCase , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ : CLIPConfig
a_ : jnp.dtype =jnp.floataa
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
_snake_case : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
_snake_case : List[Any] = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_snake_case : List[Any] = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_snake_case : Optional[int] = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
_snake_case : Union[str, Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = self.vision_model(UpperCamelCase )[1]
_snake_case : Any = self.visual_projection(UpperCamelCase )
_snake_case : int = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
_snake_case : List[str] = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_snake_case : List[str] = 0.0
_snake_case : int = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_snake_case : int = jnp.round(UpperCamelCase , 3 )
_snake_case : Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
_snake_case : List[str] = is_special_care * 0.01
_snake_case : Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_snake_case : Optional[Any] = jnp.round(UpperCamelCase , 3 )
_snake_case : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int =CLIPConfig
a_ : str ="""clip_input"""
a_ : Dict =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
if input_shape is None:
_snake_case : Optional[int] = (1, 2_24, 2_24, 3)
_snake_case : int = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ):
'''simple docstring'''
_snake_case : List[str] = jax.random.normal(UpperCamelCase , UpperCamelCase )
_snake_case , _snake_case : Tuple = jax.random.split(UpperCamelCase )
_snake_case : List[Any] = {'params': params_rng, 'dropout': dropout_rng}
_snake_case : Optional[Any] = self.module.init(UpperCamelCase , UpperCamelCase )['params']
return random_params
def __call__( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : dict = None , ):
'''simple docstring'''
_snake_case : Optional[Any] = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase_ = """CompVis/stable-diffusion-v1-1"""
lowerCAmelCase_ = """CompVis/stable-diffusion-v1-2"""
lowerCAmelCase_ = """CompVis/stable-diffusion-v1-3"""
lowerCAmelCase_ = """CompVis/stable-diffusion-v1-4"""
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : AutoencoderKL , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase : StableDiffusionSafetyChecker , UpperCamelCase : CLIPImageProcessor , UpperCamelCase : bool = True , ):
'''simple docstring'''
super()._init_()
_snake_case : List[str] = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
_snake_case : Tuple = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
_snake_case : Tuple = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
_snake_case : List[str] = StableDiffusionPipeline(
vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , requires_safety_checker=UpperCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return {k: getattr(self , UpperCamelCase ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase )
@torch.no_grad()
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : Tuple , ):
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : Any , ):
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
_snake_case : Union[str, Any] = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
_snake_case : Union[str, Any] = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
_snake_case : Dict = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
_snake_case : List[str] = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
from manim import *
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_snake_case : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : Tuple = Rectangle(height=0.25 , width=0.25 )
_snake_case : Dict = [mem.copy() for i in range(6 )]
_snake_case : Tuple = [mem.copy() for i in range(6 )]
_snake_case : str = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : int = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : List[Any] = VGroup(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : Tuple = Text('CPU' , font_size=24 )
_snake_case : Optional[Any] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase )
_snake_case : Any = [mem.copy() for i in range(4 )]
_snake_case : Union[str, Any] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : int = Text('GPU' , font_size=24 )
_snake_case : List[str] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase )
_snake_case : Optional[int] = [mem.copy() for i in range(6 )]
_snake_case : Optional[int] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : Union[str, Any] = Text('Model' , font_size=24 )
_snake_case : Union[str, Any] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase )
_snake_case : Optional[int] = []
_snake_case : Optional[int] = []
for i, rect in enumerate(UpperCamelCase ):
_snake_case : List[str] = fill.copy().set_fill(UpperCamelCase , opacity=0.8 )
target.move_to(UpperCamelCase )
model_arr.append(UpperCamelCase )
_snake_case : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCamelCase )
self.add(*UpperCamelCase , *UpperCamelCase )
_snake_case : List[str] = [meta_mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_snake_case : Optional[int] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : Optional[int] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : Union[str, Any] = VGroup(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_snake_case : List[str] = Text('Disk' , font_size=24 )
_snake_case : Optional[Any] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCamelCase , UpperCamelCase )
_snake_case : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[int] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase , UpperCamelCase )
_snake_case : int = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase )
_snake_case : Any = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase ) )
_snake_case : List[Any] = Square(0.3 )
input.set_fill(UpperCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCamelCase , buff=0.5 )
self.play(Write(UpperCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCamelCase , buff=0.02 )
self.play(MoveToTarget(UpperCamelCase ) )
self.play(FadeOut(UpperCamelCase ) )
_snake_case : Union[str, Any] = Arrow(start=UpperCamelCase , end=UpperCamelCase , color=UpperCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_snake_case : str = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase , run_time=3 ) )
_snake_case : Any = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCamelCase ) , Circumscribe(model_arr[0] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase , **UpperCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_snake_case : Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_snake_case : str = AnimationGroup(
FadeOut(UpperCamelCase , run_time=0.5 ) , MoveToTarget(UpperCamelCase , run_time=0.5 ) , FadeIn(UpperCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_snake_case : Dict = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCamelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCamelCase , **UpperCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCamelCase , **UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase , **UpperCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_snake_case : Optional[Any] = a_c
_snake_case : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCamelCase ) , FadeOut(UpperCamelCase , run_time=0.5 ) , )
_snake_case : Optional[Any] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase , run_time=3 ) , MoveToTarget(UpperCamelCase ) )
self.wait()
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] ="""Speech2TextFeatureExtractor"""
a_ : Union[str, Any] ="""Speech2TextTokenizer"""
def __init__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : int ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = self.feature_extractor
_snake_case : Tuple = False
def __call__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase , **UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case : int = kwargs.pop('raw_speech' )
else:
_snake_case : List[str] = kwargs.pop('audio' , UpperCamelCase )
_snake_case : str = kwargs.pop('sampling_rate' , UpperCamelCase )
_snake_case : Tuple = kwargs.pop('text' , UpperCamelCase )
if len(UpperCamelCase ) > 0:
_snake_case : Optional[Any] = args[0]
_snake_case : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case : Optional[int] = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase )
if text is not None:
_snake_case : Optional[Any] = self.tokenizer(UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : str = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self : str , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@contextmanager
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case : Optional[Any] = True
_snake_case : List[str] = self.tokenizer
yield
_snake_case : List[Any] = self.feature_extractor
_snake_case : List[str] = False
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowerCAmelCase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[list[bool]]:
_snake_case : Tuple = [[False for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
return canvas
def lowerCamelCase_ ( lowerCAmelCase: list[list[bool]] )-> None:
for i, row in enumerate(lowerCAmelCase ):
for j, _ in enumerate(lowerCAmelCase ):
_snake_case : Dict = bool(random.getrandbits(1 ) )
def lowerCamelCase_ ( lowerCAmelCase: list[list[bool]] )-> list[list[bool]]:
_snake_case : int = np.array(lowerCAmelCase )
_snake_case : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase ):
for c, pt in enumerate(lowerCAmelCase ):
_snake_case : List[Any] = __judge_point(
lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_snake_case : Tuple = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCamelCase_ ( lowerCAmelCase: bool , lowerCAmelCase: list[list[bool]] )-> bool:
_snake_case : Optional[Any] = 0
_snake_case : Tuple = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_snake_case : List[Any] = pt
if pt:
if alive < 2:
_snake_case : int = False
elif alive == 2 or alive == 3:
_snake_case : List[Any] = True
elif alive > 3:
_snake_case : Tuple = False
else:
if alive == 3:
_snake_case : Optional[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase_ = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase_ = create_canvas(canvas_size)
seed(c)
lowerCAmelCase_ , lowerCAmelCase_ = plt.subplots()
fig.show()
lowerCAmelCase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowerCAmelCase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =VQModel
a_ : Any ="""sample"""
@property
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[Any]=(32, 32) ):
'''simple docstring'''
_snake_case : str = 4
_snake_case : Optional[int] = 3
_snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase )
return {"sample": image}
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return (3, 32, 32)
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_snake_case : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : int = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase )
_snake_case : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_snake_case : Any = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_snake_case : Tuple = image.to(UpperCamelCase )
with torch.no_grad():
_snake_case : Dict = model(UpperCamelCase ).sample
_snake_case : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case : Union[str, Any] = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""bert"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int]=3_05_22 , UpperCamelCase : str=7_68 , UpperCamelCase : List[str]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Optional[int]=1e-1_2 , UpperCamelCase : int=0 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Dict = vocab_size
_snake_case : int = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : int = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : Any = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import sys
def lowerCamelCase_ ( lowerCAmelCase: Any )-> str:
_snake_case : Optional[Any] = len(lowerCAmelCase )
_snake_case : Dict = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
_snake_case : Dict = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2 , lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
_snake_case : int = a + chain_length - 1
_snake_case : Tuple = sys.maxsize
for c in range(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_snake_case : List[str] = cost
_snake_case : int = c
return matrix, sol
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowerCAmelCase , lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase , optimal_solution[i][j] + 1 , lowerCAmelCase )
print(')' , end=' ' )
def lowerCamelCase_ ( )-> List[str]:
_snake_case : int = [30, 35, 15, 5, 10, 20, 25]
_snake_case : Optional[Any] = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_snake_case , _snake_case : Dict = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[Any] , lowerCAmelCase: str )-> int:
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = BertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = 0
_snake_case : Any = [0]
_snake_case : Union[str, Any] = [0]
_snake_case : Optional[int] = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
_snake_case : Union[str, Any] = [60]
_snake_case : int = [10]
_snake_case : int = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = 3
_snake_case : List[Any] = [1, 2, 3]
_snake_case : Dict = [3, 2, 1]
_snake_case : List[Any] = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 5 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 50
_snake_case : List[str] = [60, 1_00, 1_20]
_snake_case : Any = [10, 20, 30]
_snake_case : Any = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: int=None , lowerCAmelCase: List[str]=None , lowerCAmelCase: str=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Dict=None , )-> Dict:
if attention_mask is None:
_snake_case : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=4 , UpperCamelCase : List[str]=4 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : List[Any]=32 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : str=0 , UpperCamelCase : List[str]=0.02 , ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : Optional[int] = batch_size
_snake_case : List[str] = seq_length
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : Any = vocab_size
_snake_case : Dict = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : List[str] = eos_token_id
_snake_case : int = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : Tuple = initializer_range
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case : Any = shift_tokens_right(UpperCamelCase , 1 , 2 )
_snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase , )
_snake_case : Optional[int] = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = 20
_snake_case : Optional[Any] = model_class_name(UpperCamelCase )
_snake_case : List[Any] = model.encode(inputs_dict['input_ids'] )
_snake_case , _snake_case : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
_snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_snake_case : Dict = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , )
_snake_case : str = model.decode(UpperCamelCase , UpperCamelCase )
_snake_case : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 20
_snake_case : Optional[Any] = model_class_name(UpperCamelCase )
_snake_case : Optional[Any] = model.encode(inputs_dict['input_ids'] )
_snake_case , _snake_case : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_snake_case : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
_snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : int = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_snake_case : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : Optional[Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase )
_snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =99
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case : int = input_ids.shape[0]
_snake_case : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Optional[int] = self._get_config_and_data()
_snake_case : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
_snake_case : Optional[Any] = lm_model(input_ids=UpperCamelCase )
_snake_case : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case : Dict = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
_snake_case : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case : str = lm_model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
_snake_case : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case : Dict = shift_tokens_right(UpperCamelCase , 1 , 2 )
_snake_case : Union[str, Any] = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum()
_snake_case : Dict = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple =True
a_ : Optional[int] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ : List[Any] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Dict = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase : str , UpperCamelCase : Dict=None , **UpperCamelCase : Union[str, Any] ):
return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
with self.subTest('JIT Enabled' ):
_snake_case : int = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_snake_case : List[str] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Optional[int] = model_class(UpperCamelCase )
_snake_case : Optional[int] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_snake_case : Optional[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
return model.decode(
decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , )
with self.subTest('JIT Enabled' ):
_snake_case : List[str] = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_snake_case : int = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : Union[str, Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Tuple = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_snake_case : List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=UpperCamelCase )
_snake_case : Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
_snake_case : Dict = ['Sam']
_snake_case : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors='jax' )
_snake_case : Any = model.generate(**UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[Any] = 'Sam is a great name. It means "sun" in Gaelic.'
_snake_case : Tuple = tokenizer.batch_decode(UpperCamelCase , **UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[str]:
_snake_case : str = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Optional[Any]:
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : Union[str, Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
_snake_case : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Any:
_snake_case : Optional[Any] = torch.load(lowerCAmelCase , map_location='cpu' )
_snake_case : Optional[int] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase )
_snake_case : str = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Union[str, Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : List[str] = XGLMConfig(
vocab_size=lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : List[Any] = XGLMForCausalLM(lowerCAmelCase )
_snake_case : Optional[int] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case : List[str] = img
_snake_case : str = img.shape[1]
_snake_case : List[str] = img.shape[0]
_snake_case : Optional[int] = dst_width
_snake_case : int = dst_height
_snake_case : Optional[int] = self.src_w / self.dst_w
_snake_case : Dict = self.src_h / self.dst_h
_snake_case : Any = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case : List[Any] = self.img[self.get_y(UpperCamelCase )][self.get_x(UpperCamelCase )]
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCamelCase_ ( self : Any , UpperCamelCase : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread("""image_data/lena.jpg""", 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[str]=10_24 , UpperCamelCase : int=10_24 , UpperCamelCase : Any=3.6 ):
'''simple docstring'''
_snake_case : Optional[int] = tokenizer
_snake_case : List[Any] = tokenizer.bos_token_id
_snake_case : Optional[int] = dataset
_snake_case : Optional[Any] = seq_length
_snake_case : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = iter(self.dataset )
_snake_case : int = True
while more_examples:
_snake_case , _snake_case : Dict = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
_snake_case : Union[str, Any] = False
break
_snake_case : Optional[int] = tokenizer(UpperCamelCase , truncation=UpperCamelCase )['input_ids']
_snake_case : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase ) , self.seq_length ):
_snake_case : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase ) == self.seq_length:
yield torch.tensor(UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Tuple:
_snake_case : int = {'streaming': True}
_snake_case : Tuple = load_dataset(args.dataset_name , split='train' , **lowerCAmelCase )
_snake_case : List[Any] = ConstantLengthDataset(lowerCAmelCase , lowerCAmelCase , seq_length=args.seq_length )
_snake_case : int = DataLoader(lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Dict:
model.eval()
_snake_case : List[Any] = []
for step, batch in enumerate(lowerCAmelCase ):
with torch.no_grad():
_snake_case : List[Any] = model(lowerCAmelCase , labels=lowerCAmelCase )
_snake_case : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_snake_case : Union[str, Any] = torch.mean(torch.cat(lowerCAmelCase ) )
try:
_snake_case : Any = torch.exp(lowerCAmelCase )
except OverflowError:
_snake_case : Tuple = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =(UniPCMultistepScheduler,)
a_ : Tuple =(("""num_inference_steps""", 25),)
def UpperCamelCase_ ( self : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**UpperCamelCase )
return config
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : str=0 , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Tuple = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , UpperCamelCase )
_snake_case : str = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**UpperCamelCase )
_snake_case : List[Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(UpperCamelCase )
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_snake_case : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Optional[int] = sample, sample
for t in range(UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_snake_case : int = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : List[Any] = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Any , UpperCamelCase : int=0 , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop('num_inference_steps' , UpperCamelCase )
_snake_case : int = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : List[str] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_snake_case : List[str] = scheduler_class.from_pretrained(UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Dict = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : Tuple = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if scheduler is None:
_snake_case : List[Any] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : int = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Any = scheduler_class(**UpperCamelCase )
_snake_case : List[Any] = 10
_snake_case : Optional[int] = self.dummy_model()
_snake_case : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
return sample
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_snake_case : str = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**UpperCamelCase )
_snake_case : Union[str, Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase )
elif num_inference_steps is not None and not hasattr(UpperCamelCase , 'set_timesteps' ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Union[str, Any] = scheduler.timesteps[5]
_snake_case : str = scheduler.timesteps[6]
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = UniPCMultistepScheduler(**self.get_scheduler_config() )
_snake_case : Optional[int] = self.full_loop(scheduler=UpperCamelCase )
_snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
_snake_case : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : int = self.full_loop(scheduler=UpperCamelCase )
_snake_case : List[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , solver_order=UpperCamelCase , solver_type=UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , )
_snake_case : Optional[int] = self.full_loop(
solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , )
assert not torch.isnan(UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase )
self.check_over_configs(lower_order_final=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=UpperCamelCase , time_step=0 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.full_loop()
_snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.full_loop(prediction_type='v_prediction' )
_snake_case : Dict = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(thresholding=UpperCamelCase , dynamic_thresholding_ratio=0 )
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = 10
_snake_case : int = self.dummy_model()
_snake_case : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Dict = model(UpperCamelCase , UpperCamelCase )
_snake_case : int = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase_ ( self : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_snake_case : str = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Dict = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
import pprint
import requests
lowerCAmelCase_ = """https://zenquotes.io/api"""
def lowerCamelCase_ ( )-> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowerCamelCase_ ( )-> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowerCAmelCase_ = random_quotes()
pprint.pprint(response)
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase_ = False
try:
lowerCAmelCase_ = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : str = None , UpperCamelCase : list = [] ):
'''simple docstring'''
_snake_case : Tuple = 0
_snake_case : Optional[Any] = choices
_snake_case : Optional[int] = prompt
if sys.platform == "win32":
_snake_case : Any = '*'
else:
_snake_case : Dict = '➔ '
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase )
else:
forceWrite(self.choices[index] , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(UpperCamelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Direction , UpperCamelCase : int = 1 ):
'''simple docstring'''
_snake_case : int = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase )
move_cursor(UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase )] for number in range(10 )] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = int(chr(self.current_selection ) )
_snake_case : int = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase )
else:
return
else:
return
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_snake_case : Optional[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_snake_case : Tuple = int(builtins.input() )
except ValueError:
_snake_case : str = default_choice
else:
_snake_case : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCamelCase , '\n' )
return choice
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: bool = False )-> dict:
_snake_case : dict = {i: [] for i in range(lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCAmelCase ):
for j in range(i + 1 , lowerCAmelCase ):
if random.random() < probability:
graph[i].append(lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCAmelCase )
return graph
def lowerCamelCase_ ( lowerCAmelCase: int )-> dict:
return {
i: [j for j in range(lowerCAmelCase ) if i != j] for i in range(lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Any =OpenAIGPTTokenizer
a_ : List[Any] =OpenAIGPTTokenizerFast
a_ : str =True
a_ : int =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_snake_case : Tuple = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : str = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_snake_case : Any = 'lower'
_snake_case : Optional[Any] = ['low', 'er</w>']
_snake_case : Optional[int] = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + ['<unk>']
_snake_case : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
_snake_case : Optional[int] = 'This is a simple input'
_snake_case : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : str = ('This is a simple input', 'This is a pair')
_snake_case : Tuple = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
pass
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : str = model
_snake_case : Optional[Any] = 2
_snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: str )-> Dict:
# load longformer model from model identifier
_snake_case : Tuple = LongformerModel.from_pretrained(lowerCAmelCase )
_snake_case : Tuple = LightningModel(lowerCAmelCase )
_snake_case : int = torch.load(lowerCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( UpperCamelCase : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
raise NotImplementedError()
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_snake_case : Optional[Any] = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter a string """).strip()
lowerCAmelCase_ = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""YolosFeatureExtractor"""]
lowerCAmelCase_ = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( lowerCAmelCase: list[Any] )-> None:
create_state_space_tree(lowerCAmelCase , [] , 0 )
def lowerCamelCase_ ( lowerCAmelCase: list[Any] , lowerCAmelCase: list[Any] , lowerCAmelCase: int )-> None:
if index == len(lowerCAmelCase ):
print(lowerCAmelCase )
return
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def lowerCamelCase_ ( lowerCAmelCase: str = "mumbai" )-> Generator[tuple[str, str], None, None]:
_snake_case : List[Any] = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
_snake_case : Optional[int] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
_snake_case : int = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[int] =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : str =field(
default=UpperCAmelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ : Optional[bool] =field(
default=UpperCAmelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase_ ( )-> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case , _snake_case , _snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_snake_case : List[Any] = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_snake_case : Optional[Any] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : str = train_dataset.features['label'].names
if training_args.do_eval:
_snake_case : List[str] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Tuple = eval_dataset.features['label'].names
if training_args.do_predict:
_snake_case : List[str] = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Optional[int] = predict_dataset.features['label'].names
# Labels
_snake_case : Any = len(lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , idalabel={str(lowerCAmelCase ): label for i, label in enumerate(lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_snake_case : Dict = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_snake_case : Optional[int] = False
def preprocess_function(lowerCAmelCase: Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCAmelCase , max_length=data_args.max_seq_length , truncation=lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case : Optional[int] = min(len(lowerCAmelCase ) , data_args.max_train_samples )
_snake_case : List[Any] = train_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_snake_case : str = train_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case : Optional[int] = min(len(lowerCAmelCase ) , data_args.max_eval_samples )
_snake_case : Union[str, Any] = eval_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_snake_case : Optional[int] = eval_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_snake_case : Union[str, Any] = min(len(lowerCAmelCase ) , data_args.max_predict_samples )
_snake_case : Union[str, Any] = predict_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_snake_case : int = predict_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_snake_case : str = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_snake_case : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_snake_case : Any = np.argmax(lowerCAmelCase , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_snake_case : Tuple = default_data_collator
elif training_args.fpaa:
_snake_case : List[str] = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_snake_case : str = None
# Initialize our Trainer
_snake_case : List[Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Any = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Tuple = last_checkpoint
_snake_case : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_snake_case : Any = train_result.metrics
_snake_case : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_snake_case : List[str] = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase )
trainer.save_metrics('train' , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase )
_snake_case : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_snake_case : Tuple = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_snake_case , _snake_case , _snake_case : Optional[int] = trainer.predict(lowerCAmelCase , metric_key_prefix='predict' )
_snake_case : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase )
)
_snake_case : List[str] = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('predict' , lowerCAmelCase )
trainer.save_metrics('predict' , lowerCAmelCase )
_snake_case : List[Any] = np.argmax(lowerCAmelCase , axis=1 )
_snake_case : int = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase ):
_snake_case : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : List[Any] = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase )
_snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : int = generator.manual_seed(0 )
_snake_case : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[str] = 'cyberpunk 2077'
_snake_case : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Optional[Any] = pipe.dual_guided(
prompt=UpperCamelCase , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : str = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_snake_case : Tuple = 'A painting of a squirrel eating a burger '
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Tuple = pipe.text_to_image(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : List[Any] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_snake_case : Tuple = pipe.image_variation(UpperCamelCase , generator=UpperCamelCase , output_type='numpy' ).images
_snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Tuple = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase_ = """main"""
# Default branch name
lowerCAmelCase_ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
lowerCAmelCase_ = """aaaaaaa"""
# This commit does not exist, so we should 404.
lowerCAmelCase_ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase_ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCamelCase_ ( )-> int:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def lowerCamelCase_ ( )-> Optional[int]:
print('Bonjour!' )
yield
print('Au revoir!' )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[Any] ):
'''simple docstring'''
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(UpperCamelCase ) , ['start_positions', 'end_positions'] )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase ) , ['labels'] )
@require_tf
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(UpperCamelCase ) , ['start_positions', 'end_positions'] )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase ) , ['labels'] )
@require_flax
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase ) , [] )
self.assertEqual(find_labels(UpperCamelCase ) , [] )
self.assertEqual(find_labels(UpperCamelCase ) , [] )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase ) , [] )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCAmelCase_ = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCAmelCase_ = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
lowerCAmelCase_ = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def lowerCamelCase_ ( lowerCAmelCase: int )-> Dict:
def remove_articles(lowerCAmelCase: List[str] ):
_snake_case : str = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCAmelCase , ' ' , lowerCAmelCase )
def white_space_fix(lowerCAmelCase: str ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase: str ):
_snake_case : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase: Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase ) ) ) )
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: int )-> Union[str, Any]:
return int(normalize_answer(lowerCAmelCase ) == normalize_answer(lowerCAmelCase ) )
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[Any] )-> Tuple:
_snake_case : List[Any] = [any(compute_exact(lowerCAmelCase , lowerCAmelCase ) for ref in refs ) for pred, refs in zip(lowerCAmelCase , lowerCAmelCase )]
return (sum(lowerCAmelCase ) / len(lowerCAmelCase )) * 1_00
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
_snake_case : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
_snake_case : str = Counter(lowerCAmelCase )
_snake_case : Union[str, Any] = Counter(lowerCAmelCase )
_snake_case : Dict = Counter()
for sgram, scount in sgramcounter.items():
_snake_case : Dict = scount * numref
_snake_case : Union[str, Any] = Counter(lowerCAmelCase )
_snake_case : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
_snake_case : Any = ccount * numref
# KEEP
_snake_case : int = sgramcounter_rep & cgramcounter_rep
_snake_case : Dict = keepgramcounter_rep & rgramcounter
_snake_case : Any = sgramcounter_rep & rgramcounter
_snake_case : str = 0
_snake_case : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case : int = 1
_snake_case : Tuple = 1
if len(lowerCAmelCase ) > 0:
_snake_case : Tuple = keeptmpscorea / len(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_snake_case : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_snake_case : Optional[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_snake_case : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_snake_case : List[str] = sgramcounter_rep - cgramcounter_rep
_snake_case : Tuple = delgramcounter_rep - rgramcounter
_snake_case : Any = sgramcounter_rep - rgramcounter
_snake_case : int = 0
_snake_case : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case : List[str] = 1
if len(lowerCAmelCase ) > 0:
_snake_case : Tuple = deltmpscorea / len(lowerCAmelCase )
# ADDITION
_snake_case : Any = set(lowerCAmelCase ) - set(lowerCAmelCase )
_snake_case : str = set(lowerCAmelCase ) & set(lowerCAmelCase )
_snake_case : Optional[int] = set(lowerCAmelCase ) - set(lowerCAmelCase )
_snake_case : Dict = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case : List[str] = 1
_snake_case : Optional[Any] = 1
if len(lowerCAmelCase ) > 0:
_snake_case : Any = addtmpscore / len(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_snake_case : Optional[Any] = addtmpscore / len(lowerCAmelCase )
_snake_case : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
_snake_case : Dict = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Any , lowerCAmelCase: Tuple )-> int:
_snake_case : Dict = len(lowerCAmelCase )
_snake_case : Optional[Any] = ssent.split(' ' )
_snake_case : str = csent.split(' ' )
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = []
_snake_case : List[str] = []
_snake_case : Any = []
_snake_case : Union[str, Any] = []
_snake_case : List[str] = []
_snake_case : Optional[int] = []
_snake_case : Any = []
_snake_case : Union[str, Any] = []
_snake_case : Tuple = []
for rsent in rsents:
_snake_case : List[str] = rsent.split(' ' )
_snake_case : Dict = []
_snake_case : Optional[Any] = []
_snake_case : Optional[int] = []
ragramslist.append(lowerCAmelCase )
for i in range(0 , len(lowerCAmelCase ) - 1 ):
if i < len(lowerCAmelCase ) - 1:
_snake_case : str = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 2:
_snake_case : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 3:
_snake_case : str = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCAmelCase )
ragramslist.append(lowerCAmelCase )
ragramslist.append(lowerCAmelCase )
ragramslist.append(lowerCAmelCase )
for i in range(0 , len(lowerCAmelCase ) - 1 ):
if i < len(lowerCAmelCase ) - 1:
_snake_case : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 2:
_snake_case : int = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 3:
_snake_case : List[Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCAmelCase )
for i in range(0 , len(lowerCAmelCase ) - 1 ):
if i < len(lowerCAmelCase ) - 1:
_snake_case : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 2:
_snake_case : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCAmelCase )
if i < len(lowerCAmelCase ) - 3:
_snake_case : int = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCAmelCase )
((_snake_case) , (_snake_case) , (_snake_case)) : Tuple = SARIngram(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((_snake_case) , (_snake_case) , (_snake_case)) : str = SARIngram(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((_snake_case) , (_snake_case) , (_snake_case)) : str = SARIngram(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((_snake_case) , (_snake_case) , (_snake_case)) : Optional[int] = SARIngram(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_snake_case : Union[str, Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_snake_case : Any = sum([delascore, delascore, delascore, delascore] ) / 4
_snake_case : Any = sum([addascore, addascore, addascore, addascore] ) / 4
_snake_case : Optional[int] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: bool = True , lowerCAmelCase: str = "13a" , lowerCAmelCase: bool = True )-> Union[str, Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_snake_case : str = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_snake_case : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase )()(lowerCAmelCase )
else:
_snake_case : Any = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase )
elif tokenizer == "moses":
_snake_case : Dict = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase , return_str=lowerCAmelCase , escape=lowerCAmelCase )
elif tokenizer == "penn":
_snake_case : int = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase , return_str=lowerCAmelCase )
else:
_snake_case : Optional[int] = sentence
if not return_str:
_snake_case : Tuple = normalized_sent.split()
return normalized_sent
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Optional[Any] , lowerCAmelCase: str )-> int:
if not (len(lowerCAmelCase ) == len(lowerCAmelCase ) == len(lowerCAmelCase )):
raise ValueError('Sources length must match predictions and references lengths.' )
_snake_case : Dict = 0
for src, pred, refs in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
sari_score += SARIsent(normalize(lowerCAmelCase ) , normalize(lowerCAmelCase ) , [normalize(lowerCAmelCase ) for sent in refs] )
_snake_case : Any = sari_score / len(lowerCAmelCase )
return 1_00 * sari_score
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str] , lowerCAmelCase: Optional[Any]="exp" , lowerCAmelCase: List[Any]=None , lowerCAmelCase: List[str]=False , lowerCAmelCase: int=False , lowerCAmelCase: List[str]=False , )-> Union[str, Any]:
_snake_case : Optional[Any] = len(references[0] )
if any(len(lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : Optional[Any] = [[refs[i] for refs in references] for i in range(lowerCAmelCase )]
_snake_case : int = sacrebleu.corpus_bleu(
lowerCAmelCase , lowerCAmelCase , smooth_method=lowerCAmelCase , smooth_value=lowerCAmelCase , force=lowerCAmelCase , lowercase=lowerCAmelCase , use_effective_order=lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : str = {}
result.update({'sari': compute_sari(sources=UpperCamelCase , predictions=UpperCamelCase , references=UpperCamelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=UpperCamelCase , references=UpperCamelCase )} )
result.update({'exact': compute_em(predictions=UpperCamelCase , references=UpperCamelCase )} )
return result
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.