code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import qiskit
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_snake_case : List[str] = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_snake_case : Tuple = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 652 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase_ : List[str] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Path , lowerCamelCase_ : Union[str, None] = None , lowerCamelCase_ : Union[List[str], None] = None , lowerCamelCase_ : Union[str, List[str], None] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
_snake_case : Tuple = [file for file in os.listdir(lowerCamelCase_ ) if os.path.isfile(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )]
if identifier is not None:
_snake_case : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for n_ in n_identifier:
_snake_case : Tuple = [file for file in files if n_ not in file]
else:
_snake_case : List[str] = [file for file in files if n_identifier not in file]
_snake_case : List[Any] = ignore_files or []
ignore_files.append('__init__.py' )
_snake_case : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowerCamelCase_ )
if only_modules:
_snake_case : Dict = file.split('.' )[0]
try:
_snake_case : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = doctest.DocTestSuite(lowerCamelCase_ )
_snake_case : int = unittest.TextTestRunner().run(lowerCamelCase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
_snake_case : Tuple = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = Path('src/transformers' )
_snake_case : Union[str, Any] = 'modeling'
_snake_case : Optional[int] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ , ignore_files=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = Path('src/transformers' )
_snake_case : int = 'tokenization'
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = Path('src/transformers' )
_snake_case : int = 'configuration'
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Any = Path('src/transformers' )
_snake_case : Optional[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowerCamelCase_ , n_identifier=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = Path('docs/source' )
_snake_case : Optional[int] = ['favicon.ico']
self.analyze_directory(lowerCamelCase_ , ignore_files=lowerCamelCase_ , only_modules=lowerCamelCase_ )
| 652 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = "timm_backbone"
def __init__( self : Optional[int] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[int]=None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Union[str, Any] = backbone
_snake_case : str = num_channels
_snake_case : Dict = features_only
_snake_case : Union[str, Any] = use_pretrained_backbone
_snake_case : Tuple = True
_snake_case : str = out_indices if out_indices is not None else (-1,)
| 652 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = DDIMPipeline
_UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : List[str] = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : Union[str, Any] = {'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Dict = self.get_dummy_inputs(lowerCamelCase_ )
_snake_case : Optional[Any] = pipe(**lowerCamelCase_ ).images
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_snake_case : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_snake_case : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 'google/ddpm-cifar10-32'
_snake_case : Any = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : List[str] = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : List[Any] = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='numpy' ).images
_snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = 'google/ddpm-ema-bedroom-256'
_snake_case : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = DDIMScheduler.from_pretrained(lowerCamelCase_ )
_snake_case : str = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Optional[Any] = ddpm(generator=lowerCamelCase_ , output_type='numpy' ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case : Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def A__( __lowerCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = np.nan
for i in range(__lowerCAmelCase ):
_snake_case : List[Any] = features[:, labels == i]
_snake_case : int = data.mean(1 )
# Centralize the data of class i
_snake_case : List[Any] = data - column_reshape(__lowerCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__lowerCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case : List[str] = np.dot(__lowerCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = features.mean(1 )
_snake_case : Union[str, Any] = np.nan
for i in range(__lowerCAmelCase ):
_snake_case : str = features[:, labels == i]
_snake_case : int = data.shape[1]
_snake_case : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case : List[Any] = device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
return covariance_sum / features.shape[1]
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Check if the features have been loaded
if features.any():
_snake_case : Optional[Any] = features.mean(1 )
# Center the dataset
_snake_case : int = features - np.reshape(__lowerCAmelCase , (data_mean.size, 1) )
_snake_case : Optional[int] = np.dot(__lowerCAmelCase , centered_data.T ) / features.shape[1]
_snake_case , _snake_case : Any = np.linalg.eigh(__lowerCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_snake_case : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_snake_case : List[Any] = np.dot(filtered_eigenvectors.T , __lowerCAmelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_snake_case , _snake_case : Optional[int] = eigh(
covariance_between_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , covariance_within_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
_snake_case : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
_snake_case , _snake_case , _snake_case : Union[str, Any] = np.linalg.svd(__lowerCAmelCase )
_snake_case : int = svd_matrix[:, 0:dimensions]
_snake_case : List[str] = np.dot(filtered_svd_matrix.T , __lowerCAmelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def A__( ):
# Create dummy dataset with 2 classes and 3 features
_snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_snake_case : str = np.array([0, 0, 0, 1, 1] )
_snake_case : Union[str, Any] = 2
_snake_case : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case : Optional[Any] = linear_discriminant_analysis(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def A__( ):
_snake_case : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_snake_case : List[Any] = 2
_snake_case : Any = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case : Dict = principal_component_analysis(__lowerCAmelCase , __lowerCAmelCase )
if not np.allclose(__lowerCAmelCase , __lowerCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 1 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if nth_term == "":
return [""]
_snake_case : int = int(__lowerCAmelCase )
_snake_case : Union[str, Any] = int(__lowerCAmelCase )
_snake_case : list[str] = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Dict = int(input('''Enter the last number (nth term) of the P-Series'''))
lowercase_ : List[str] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 652 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = []
for line in lines:
_snake_case : Any = re.sub(R'#.*' , '' , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
_snake_case : Dict = '\n'.join(__lowerCAmelCase )
# Make a hash from all this code
_snake_case : Optional[Any] = full_str.encode('utf-8' )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
lowercase_ : Dict = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase_ : str = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase_ : str = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
lowercase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 652 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "transfo-xl"
_UpperCamelCase : Dict = ["mems"]
_UpperCamelCase : Union[str, Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCamelCase_ : List[str]=26_77_35 , lowerCamelCase_ : List[str]=[2_00_00, 4_00_00, 20_00_00] , lowerCamelCase_ : List[Any]=10_24 , lowerCamelCase_ : Dict=10_24 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : int=64 , lowerCamelCase_ : List[Any]=40_96 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=18 , lowerCamelCase_ : List[Any]=16_00 , lowerCamelCase_ : Dict=10_00 , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=0 , lowerCamelCase_ : str=-1 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]="normal" , lowerCamelCase_ : Union[str, Any]=0.01 , lowerCamelCase_ : Optional[int]=0.01 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-5 , lowerCamelCase_ : int=0 , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Any = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
_snake_case : Any = [False] + [True] * len(self.cutoffs )
else:
_snake_case : Union[str, Any] = [False] + [False] * len(self.cutoffs )
_snake_case : Any = d_model
_snake_case : Tuple = d_embed
_snake_case : Any = d_head
_snake_case : int = d_inner
_snake_case : Optional[Any] = div_val
_snake_case : Dict = pre_lnorm
_snake_case : Optional[Any] = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Optional[Any] = mem_len
_snake_case : Union[str, Any] = same_length
_snake_case : str = attn_type
_snake_case : str = clamp_len
_snake_case : str = sample_softmax
_snake_case : str = adaptive
_snake_case : int = dropout
_snake_case : Optional[Any] = dropatt
_snake_case : str = untie_r
_snake_case : Dict = init
_snake_case : str = init_range
_snake_case : Optional[Any] = proj_init_std
_snake_case : Optional[Any] = init_std
_snake_case : Any = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
lowercase_ : int = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 1 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase="pt" ):
_snake_case : Union[str, Any] = {'add_prefix_space': True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(' ' ) else {}
_snake_case : str = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ):
_snake_case : List[Any] = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any]="train" , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]="" , ):
'''simple docstring'''
super().__init__()
_snake_case : int = Path(lowerCamelCase_ ).joinpath(type_path + '.source' )
_snake_case : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + '.target' )
_snake_case : str = self.get_char_lens(self.src_file )
_snake_case : List[str] = max_source_length
_snake_case : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case : int = tokenizer
_snake_case : Optional[Any] = prefix
if n_obs is not None:
_snake_case : Union[str, Any] = self.src_lens[:n_obs]
_snake_case : Any = src_lang
_snake_case : Optional[int] = tgt_lang
def __len__( self : Dict ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[int] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = index + 1 # linecache starts at 1
_snake_case : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip('\n' )
_snake_case : int = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case : List[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
_snake_case : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
_snake_case : List[str] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , 'right' )
_snake_case : Any = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , 'right' )
_snake_case : Optional[int] = source_inputs['input_ids'].squeeze()
_snake_case : Optional[Any] = target_inputs['input_ids'].squeeze()
_snake_case : Any = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = torch.stack([x['input_ids'] for x in batch] )
_snake_case : Union[str, Any] = torch.stack([x['attention_mask'] for x in batch] )
_snake_case : Dict = torch.stack([x['decoder_input_ids'] for x in batch] )
_snake_case : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
_snake_case : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
_snake_case : Tuple = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
_snake_case , _snake_case : Any = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_snake_case : Any = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowercase_ : Optional[int] = getLogger(__name__)
def A__( __lowerCAmelCase ):
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def A__( __lowerCAmelCase ):
_snake_case : Tuple = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , 'git_log.json' ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=4 , **__lowerCAmelCase ):
with open(__lowerCAmelCase , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def A__( __lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def A__( ):
_snake_case : Any = git.Repo(search_parent_directories=__lowerCAmelCase )
_snake_case : Tuple = {
'repo_id': str(__lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , 'wb' ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
def remove_articles(__lowerCAmelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
_snake_case : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = normalize_answer(__lowerCAmelCase ).split()
_snake_case : int = normalize_answer(__lowerCAmelCase ).split()
_snake_case : Optional[Any] = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
_snake_case : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_snake_case : str = 1.0 * num_same / len(__lowerCAmelCase )
_snake_case : Optional[int] = 1.0 * num_same / len(__lowerCAmelCase )
_snake_case : Any = (2 * precision * recall) / (precision + recall)
return fa
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_snake_case : Dict = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def A__( __lowerCAmelCase ):
return model_prefix.startswith('rag' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case : int = 'dropout_rate'
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
_snake_case : Optional[Any] = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 652 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A__( __lowerCAmelCase = 2_00_00_00 ):
_snake_case : list[int] = [0]
_snake_case : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_snake_case : int = 0
# the area corresponding to the grid that gives the product closest to target
_snake_case : int = 0
# an estimate of b, using the quadratic formula
_snake_case : float
# the largest integer less than b_estimate
_snake_case : int
# the largest integer less than b_estimate
_snake_case : int
# the triangle number corresponding to b_floor
_snake_case : int
# the triangle number corresponding to b_ceil
_snake_case : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_snake_case : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_snake_case : List[str] = floor(__lowerCAmelCase )
_snake_case : Tuple = ceil(__lowerCAmelCase )
_snake_case : List[Any] = triangle_numbers[b_floor]
_snake_case : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_snake_case : Any = triangle_b_first_guess * triangle_a
_snake_case : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_snake_case : List[str] = triangle_b_second_guess * triangle_a
_snake_case : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : Dict=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Optional[int]="silu" , lowerCamelCase_ : List[str]=True , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = layers_per_block
_snake_case : str = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = nn.ModuleList([] )
# down
_snake_case : Any = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
_snake_case : List[Any] = output_channel
_snake_case : Tuple = block_out_channels[i]
_snake_case : List[str] = i == len(lowerCamelCase_ ) - 1
_snake_case : List[str] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
_snake_case : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
_snake_case : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
_snake_case : Optional[int] = nn.SiLU()
_snake_case : List[Any] = 2 * out_channels if double_z else out_channels
_snake_case : int = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
_snake_case : str = False
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : str = x
_snake_case : Tuple = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : List[Any] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
_snake_case : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
_snake_case : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
_snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case : Tuple = down_block(lowerCamelCase_ )
# middle
_snake_case : List[str] = self.mid_block(lowerCamelCase_ )
# post-process
_snake_case : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
_snake_case : int = self.conv_act(lowerCamelCase_ )
_snake_case : Optional[Any] = self.conv_out(lowerCamelCase_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=("UpDecoderBlock2D",) , lowerCamelCase_ : Optional[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[Any]="group" , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = layers_per_block
_snake_case : Tuple = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case : str = None
_snake_case : List[Any] = nn.ModuleList([] )
_snake_case : str = in_channels if norm_type == 'spatial' else None
# mid
_snake_case : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
_snake_case : Tuple = list(reversed(lowerCamelCase_ ) )
_snake_case : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
_snake_case : int = output_channel
_snake_case : Any = reversed_block_out_channels[i]
_snake_case : Any = i == len(lowerCamelCase_ ) - 1
_snake_case : Tuple = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
_snake_case : Dict = output_channel
# out
if norm_type == "spatial":
_snake_case : Dict = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
_snake_case : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
_snake_case : Optional[Any] = nn.SiLU()
_snake_case : Optional[int] = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
_snake_case : List[Any] = False
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
'''simple docstring'''
_snake_case : Tuple = z
_snake_case : Any = self.conv_in(lowerCamelCase_ )
_snake_case : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : int ):
def custom_forward(*lowerCamelCase_ : Dict ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
_snake_case : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
_snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
_snake_case : List[Any] = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : str = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
_snake_case : Tuple = self.conv_norm_out(lowerCamelCase_ )
else:
_snake_case : List[Any] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = self.conv_act(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.conv_out(lowerCamelCase_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]="random" , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : str=True ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = n_e
_snake_case : Dict = vq_embed_dim
_snake_case : int = beta
_snake_case : List[Any] = legacy
_snake_case : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case : List[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case : str = self.used.shape[0]
_snake_case : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case : Union[str, Any] = self.re_embed
_snake_case : int = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_snake_case : List[Any] = n_e
_snake_case : List[str] = sane_index_shape
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = inds.shape
assert len(lowerCamelCase_ ) > 1
_snake_case : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_snake_case : List[str] = self.used.to(lowerCamelCase_ )
_snake_case : List[Any] = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case : Union[str, Any] = match.argmax(-1 )
_snake_case : Optional[int] = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case : str = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case : Tuple = self.unknown_index
return new.reshape(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Dict = inds.shape
assert len(lowerCamelCase_ ) > 1
_snake_case : str = inds.reshape(ishape[0] , -1 )
_snake_case : Dict = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case : Tuple = 0 # simply set to zero
_snake_case : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case : Optional[Any] = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
_snake_case : Any = self.embedding(lowerCamelCase_ ).view(z.shape )
_snake_case : Union[str, Any] = None
_snake_case : Dict = None
# compute loss for embedding
if not self.legacy:
_snake_case : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case : Union[str, Any] = self.remap_to_used(lowerCamelCase_ )
_snake_case : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
_snake_case : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case : Tuple = self.unmap_to_all(lowerCamelCase_ )
_snake_case : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case : Any = self.embedding(lowerCamelCase_ )
if shape is not None:
_snake_case : Tuple = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
_snake_case : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str]=False ):
'''simple docstring'''
_snake_case : List[str] = parameters
_snake_case , _snake_case : Tuple = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
_snake_case : Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case : Optional[Any] = deterministic
_snake_case : str = torch.exp(0.5 * self.logvar )
_snake_case : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_snake_case : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
_snake_case : Tuple = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case : Union[str, Any] = self.mean + self.std * sample
return x
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case : Optional[int] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return self.mean
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Dict = image_classifier(lowerCamelCase_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase_ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
_snake_case : Dict = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Dict = image_classifier(lowerCamelCase_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
_snake_case : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
] , )
@slow
@require_torch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Any = image_classifier(lowerCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_snake_case : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Tuple = image_classifier(lowerCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_snake_case : Tuple = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 652 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 652 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = MobileBertTokenizer
_UpperCamelCase : Tuple = MobileBertTokenizerFast
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Dict = filter_non_english
_UpperCamelCase : Any = "google/mobilebert-uncased"
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_snake_case : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Any = 'UNwant\u00E9d,running'
_snake_case : Optional[Any] = 'unwanted, running'
return input_text, output_text
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class(self.vocab_file )
_snake_case : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCamelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[int] = self.get_rust_tokenizer()
_snake_case : str = 'UNwant\u00E9d,running'
_snake_case : str = tokenizer.tokenize(lowerCamelCase_ )
_snake_case : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : str = tokenizer.encode(lowerCamelCase_ )
_snake_case : Dict = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# With lower casing
_snake_case : str = self.get_tokenizer(do_lower_case=lowerCamelCase_ )
_snake_case : Tuple = self.get_rust_tokenizer(do_lower_case=lowerCamelCase_ )
_snake_case : Tuple = 'UNwant\u00E9d,running'
_snake_case : int = tokenizer.tokenize(lowerCamelCase_ )
_snake_case : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : str = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[int] = tokenizer.encode(lowerCamelCase_ )
_snake_case : int = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : str = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case : Any = {}
for i, token in enumerate(lowerCamelCase_ ):
_snake_case : Optional[Any] = i
_snake_case : Any = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_snake_case : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[Any] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case : Optional[int] = tokenizer_r.encode_plus(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , )
_snake_case : Optional[Any] = tokenizer_r.do_lower_case if hasattr(lowerCamelCase_ , 'do_lower_case' ) else False
_snake_case : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : str = ['的', '人', '有']
_snake_case : Optional[int] = ''.join(lowerCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Tuple = True
_snake_case : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Dict = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : int = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : Dict = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = False
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Any = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Optional[int] = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : List[Any] = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : str = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
_snake_case : str = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case : Optional[Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCamelCase_ )
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 652 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ : List[Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 1 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Check if the input is valid
if not len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case , _snake_case , _snake_case : Optional[Any] = equationa
_snake_case , _snake_case , _snake_case : List[str] = equationa
# Calculate the determinants of the matrices
_snake_case : Optional[int] = aa * ba - aa * ba
_snake_case : Any = ca * ba - ca * ba
_snake_case : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : str = determinant_x / determinant
_snake_case : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 652 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[str] , lowerCamelCase_ : Union[np.ndarray, bytes, str] , **lowerCamelCase_ : str ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : str , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : str = {}
if "candidate_labels" in kwargs:
_snake_case : List[Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_snake_case : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str=None , lowerCamelCase_ : Dict="This is a sound of {}." ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_snake_case : List[Any] = requests.get(lowerCamelCase_ ).content
else:
with open(lowerCamelCase_ , 'rb' ) as f:
_snake_case : Optional[Any] = f.read()
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Optional[Any] = ffmpeg_read(lowerCamelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase_ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_snake_case : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
_snake_case : Optional[int] = candidate_labels
_snake_case : int = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
_snake_case : List[str] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_ )
_snake_case : Optional[Any] = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = model_inputs.pop('candidate_labels' )
_snake_case : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCamelCase_ ):
_snake_case : Dict = text_inputs[0]
else:
# Batching case.
_snake_case : Optional[int] = text_inputs[0][0]
_snake_case : Any = self.model(**lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : int = model_outputs.pop('candidate_labels' )
_snake_case : Optional[Any] = model_outputs['logits'][0]
if self.framework == "pt":
_snake_case : List[str] = logits.softmax(dim=0 )
_snake_case : Tuple = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_snake_case : Dict = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_ ) , key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase_ : int = logging.get_logger(__name__)
class lowercase :
"""simple docstring"""
_UpperCamelCase : str
_UpperCamelCase : str = None
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def __UpperCAmelCase ( cls : List[Any] ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[Any] = "optuna"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_optuna_available()
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = "ray"
_UpperCamelCase : Optional[Any] = "'ray[tune]'"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_ray_available()
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = "sigopt"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "wandb"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_wandb_available()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
lowercase_ : Union[str, Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A__( ):
_snake_case : Any = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCAmelCase ) > 0:
_snake_case : Dict = available_backends[0].name
if len(__lowerCAmelCase ) > 1:
logger.info(
F'''{len(__lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 652 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Optional[int] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 1 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == r:
for j in range(__lowerCAmelCase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_snake_case : List[Any] = arr[i]
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 , __lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# A temporary array to store all combination one by one
_snake_case : Dict = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 , __lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase_ : List[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 652 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 1 |
import random
def A__( __lowerCAmelCase ):
_snake_case : List[str] = num - 1
_snake_case : Optional[int] = 0
while s % 2 == 0:
_snake_case : List[str] = s // 2
t += 1
for _ in range(5 ):
_snake_case : Dict = random.randrange(2 , num - 1 )
_snake_case : int = pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if v != 1:
_snake_case : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_snake_case : List[Any] = i + 1
_snake_case : Union[str, Any] = (v**2) % num
return True
def A__( __lowerCAmelCase ):
if num < 2:
return False
_snake_case : Tuple = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCAmelCase )
def A__( __lowerCAmelCase = 10_24 ):
while True:
_snake_case : Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowerCAmelCase ):
return num
if __name__ == "__main__":
lowercase_ : List[str] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 652 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 1 |
import math
def A__( __lowerCAmelCase ):
_snake_case : int = [True] * n
_snake_case : str = False
_snake_case : int = False
_snake_case : Optional[int] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_snake_case : List[str] = i * 2
while index < n:
_snake_case : Optional[int] = False
_snake_case : Tuple = index + i
_snake_case : List[str] = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def A__( __lowerCAmelCase = 99_99_66_66_33_33 ):
_snake_case : Dict = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_00
_snake_case : Dict = prime_sieve(__lowerCAmelCase )
_snake_case : Union[str, Any] = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
_snake_case : List[Any] = primes[prime_index + 1]
_snake_case : Optional[int] = last_prime**2
_snake_case : List[Any] = next_prime**2
# Get numbers divisible by lps(current)
_snake_case : Dict = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_snake_case : Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_snake_case : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_snake_case : Any = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
class lowercase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
_snake_case : List[str] = row
_snake_case : Tuple = col
_snake_case : List[Any] = graph
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
_snake_case : int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_snake_case : Union[str, Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
_snake_case : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ): # And finally, count all islands.
'''simple docstring'''
_snake_case : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
_snake_case : List[str] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
count += 1
return count
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Dict = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = {
'num_train_timesteps': 10_00,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase_ , prev_timestep=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config(variance_type='fixed_small_log' )
_snake_case : str = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1e-5
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(variance_type='learned_range' )
_snake_case : Dict = scheduler_class(**lowerCamelCase_ )
_snake_case : List[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase_ ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCamelCase_ ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCamelCase_ ) - -0.001_0011 < 1e-5
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : Tuple = scheduler.timesteps
_snake_case : Dict = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter
_snake_case : List[str] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
_snake_case : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
_snake_case : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
_snake_case : Union[str, Any] = pred_prev_sample
_snake_case : str = torch.sum(torch.abs(lowerCamelCase_ ) )
_snake_case : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : Optional[int] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(25 )
_snake_case : Union[str, Any] = scheduler.timesteps
_snake_case : Tuple = self.dummy_model()
_snake_case : str = self.dummy_sample_deter
_snake_case : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
if i + 1 == timesteps.shape[0]:
_snake_case : List[Any] = None
else:
_snake_case : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_snake_case : Any = scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prev_timestep=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
_snake_case : Optional[int] = pred_prev_sample
_snake_case : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
_snake_case : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_snake_case : Dict = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' , revision=__lowerCAmelCase )
| 652 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import datasets
from .evaluate import evaluate
lowercase_ : Union[str, Any] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowercase_ : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowercase_ : Dict = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_snake_case : Dict = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_snake_case : List[str] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase_ : Union[str, Any] = random.Random()
def A__( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if rng is None:
_snake_case : str = global_rng
_snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=7 , lowerCamelCase_ : List[Any]=4_00 , lowerCamelCase_ : List[str]=20_00 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Dict=1_60 , lowerCamelCase_ : int=8 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[str]=40_00 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=True , ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Optional[Any] = batch_size
_snake_case : int = min_seq_length
_snake_case : Tuple = max_seq_length
_snake_case : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case : List[str] = padding_value
_snake_case : Optional[Any] = sampling_rate
_snake_case : Optional[int] = return_attention_mask
_snake_case : Optional[int] = do_normalize
_snake_case : List[Any] = feature_size
_snake_case : int = chunk_length
_snake_case : Union[str, Any] = hop_length
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=False ):
'''simple docstring'''
def _flatten(lowerCamelCase_ : Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
_snake_case : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case : Optional[Any] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
_snake_case : Tuple = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[Any] = feat_extract_first.to_dict()
_snake_case : List[Any] = feat_extract_second.to_dict()
_snake_case : List[Any] = feat_extract_first.mel_filters
_snake_case : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Any = os.path.join(lowerCamelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase_ )
_snake_case : List[str] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
_snake_case : str = feat_extract_first.to_dict()
_snake_case : Union[str, Any] = feat_extract_second.to_dict()
_snake_case : str = feat_extract_first.mel_filters
_snake_case : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case : Dict = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case : Dict = feature_extractor(lowerCamelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_snake_case : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_snake_case : str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
_snake_case : Any = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_snake_case : Optional[int] = np.asarray(lowerCamelCase_ )
_snake_case : Union[str, Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : str = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test truncation required
_snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
_snake_case : str = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
_snake_case : int = [x[: feature_extractor.n_samples] for x in speech_inputs]
_snake_case : Dict = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated]
_snake_case : str = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
import torch
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = np.random.rand(1_00 , 32 ).astype(np.floataa )
_snake_case : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case : Dict = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case : Any = ds.sort('id' ).select(range(lowerCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_snake_case : str = self._load_datasamples(1 )
_snake_case : Any = WhisperFeatureExtractor()
_snake_case : int = feature_extractor(lowerCamelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = self._load_datasamples(1 )[0]
_snake_case : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_snake_case : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase_ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1e-3 ) )
| 652 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 1 |
def A__( __lowerCAmelCase ):
return "".join([hex(__lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCAmelCase )] )
def A__( __lowerCAmelCase ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
class lowercase ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 20 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : str=77 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = "linear" , lowerCamelCase_ : Optional[str] = "prd" , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = num_attention_heads
_snake_case : Optional[Any] = attention_head_dim
_snake_case : Tuple = num_attention_heads * attention_head_dim
_snake_case : Any = additional_embeddings
_snake_case : Union[str, Any] = time_embed_dim or inner_dim
_snake_case : Any = embedding_proj_dim or embedding_dim
_snake_case : int = clip_embed_dim or embedding_dim
_snake_case : Tuple = Timesteps(lowerCamelCase_ , lowerCamelCase_ , 0 )
_snake_case : int = TimestepEmbedding(lowerCamelCase_ , lowerCamelCase_ , out_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ )
_snake_case : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_snake_case : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if encoder_hid_proj_type is None:
_snake_case : List[Any] = None
elif encoder_hid_proj_type == "linear":
_snake_case : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_snake_case : List[Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase_ ) )
if added_emb_type == "prd":
_snake_case : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase_ ) )
elif added_emb_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_snake_case : Any = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , activation_fn='gelu' , attention_bias=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
_snake_case : Any = nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
_snake_case : List[str] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
_snake_case : Optional[int] = nn.LayerNorm(lowerCamelCase_ )
_snake_case : int = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
_snake_case : Dict = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase_ , persistent=lowerCamelCase_ )
_snake_case : Union[str, Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = {}
def fn_recursive_add_processors(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
_snake_case : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Dict = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : List[Any] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[torch.Tensor, float, int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[torch.BoolTensor] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
_snake_case : Any = hidden_states.shape[0]
_snake_case : Tuple = timestep
if not torch.is_tensor(lowerCamelCase_ ):
_snake_case : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
_snake_case : List[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : List[str] = timesteps * torch.ones(lowerCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Any = self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : List[str] = timesteps_projected.to(dtype=self.dtype )
_snake_case : int = self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
_snake_case : Dict = self.embedding_proj_norm(lowerCamelCase_ )
_snake_case : str = self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : int = self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : List[str] = self.proj_in(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : Optional[int] = hidden_states[:, None, :]
_snake_case : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ , -1 , -1 )
additional_embeds.append(lowerCamelCase_ )
_snake_case : str = torch.cat(
lowerCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Dict = F.pad(
lowerCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
_snake_case : str = F.pad(lowerCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
_snake_case : List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Optional[Any] = self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
_snake_case : int = block(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_snake_case : Dict = self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
_snake_case : Optional[int] = hidden_states[:, -1]
else:
_snake_case : str = hidden_states[:, additional_embeddings_len:]
_snake_case : List[str] = self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import math
import unittest
def A__( __lowerCAmelCase ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 652 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ : str = False
@skip_mps
class lowercase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = StableDiffusionAttendAndExcitePipeline
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
_UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __UpperCAmelCase ( cls : Dict ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
_snake_case : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_snake_case : Optional[int] = CLIPTextModel(lowerCamelCase_ )
_snake_case : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Any = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : List[Any] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
_snake_case : Tuple = pipe(**lowerCamelCase_ ).images
_snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
_snake_case : Optional[int] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
_snake_case : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Dict = torch.manual_seed(51 )
_snake_case : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to('cuda' )
_snake_case : Union[str, Any] = 'a painting of an elephant with glasses'
_snake_case : Union[str, Any] = [5, 7]
_snake_case : int = pipe(
prompt=lowerCamelCase_ , token_indices=lowerCamelCase_ , guidance_scale=7.5 , generator=lowerCamelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
_snake_case : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 652 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = "nllb-moe"
_UpperCamelCase : Union[str, Any] = ["past_key_values"]
_UpperCamelCase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str , lowerCamelCase_ : List[str]=12_81_12 , lowerCamelCase_ : Any=10_24 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=40_96 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Tuple=40_96 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[str]=0.05 , lowerCamelCase_ : Tuple=0.05 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : Optional[Any]=10_24 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : int=2 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict="float32" , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Union[str, Any]=1_28 , lowerCamelCase_ : Optional[Any]=64 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : int=4 , lowerCamelCase_ : Dict=0.001 , lowerCamelCase_ : Any=0.001 , lowerCamelCase_ : str="all" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict=1.0 , lowerCamelCase_ : Optional[int]=0.2 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : int=False , **lowerCamelCase_ : Any , ):
'''simple docstring'''
_snake_case : List[str] = vocab_size
_snake_case : int = max_position_embeddings
_snake_case : List[Any] = d_model
_snake_case : Any = encoder_ffn_dim
_snake_case : List[Any] = encoder_layers
_snake_case : Any = encoder_attention_heads
_snake_case : List[Any] = decoder_ffn_dim
_snake_case : List[Any] = decoder_layers
_snake_case : List[str] = decoder_attention_heads
_snake_case : List[Any] = dropout
_snake_case : Union[str, Any] = attention_dropout
_snake_case : Optional[Any] = activation_dropout
_snake_case : int = activation_function
_snake_case : List[Any] = init_std
_snake_case : Union[str, Any] = encoder_layerdrop
_snake_case : Tuple = decoder_layerdrop
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = encoder_layers
_snake_case : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : Any = router_z_loss_coef
_snake_case : Tuple = router_aux_loss_coef
_snake_case : List[Any] = decoder_sparse_step
_snake_case : Any = encoder_sparse_step
_snake_case : Any = num_experts
_snake_case : str = expert_capacity
_snake_case : Any = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_snake_case : Union[str, Any] = router_dtype
_snake_case : int = router_ignore_padding_tokens
_snake_case : Any = batch_prioritized_routing
_snake_case : List[str] = second_expert_policy
_snake_case : Optional[int] = normalize_router_prob_before_dropping
_snake_case : int = moe_eval_capacity_token_fraction
_snake_case : Tuple = moe_token_dropout
_snake_case : Dict = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 652 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = ProphetNetTokenizer
_UpperCamelCase : Any = False
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
_snake_case : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = 'UNwant\u00E9d,running'
_snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file )
_snake_case : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCamelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case : Optional[int] = {}
for i, token in enumerate(lowerCamelCase_ ):
_snake_case : Any = i
_snake_case : Dict = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case : List[Any] = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_snake_case : int = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_snake_case : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase_ )
_snake_case : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 652 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 1 |
def A__( __lowerCAmelCase ):
_snake_case : Dict = int(__lowerCAmelCase )
if n_element < 1:
_snake_case : Tuple = ValueError('a should be a positive number' )
raise my_error
_snake_case : Dict = [1]
_snake_case , _snake_case , _snake_case : Tuple = (0, 0, 0)
_snake_case : List[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ : Optional[Any] = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowercase_ : Optional[int] = hamming(int(n))
print('''-----------------------------------------------------''')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 652 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : float
_UpperCamelCase : TreeNode | None = None
_UpperCamelCase : TreeNode | None = None
def A__( __lowerCAmelCase ):
# Validation
def is_valid_tree(__lowerCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__lowerCAmelCase ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(__lowerCAmelCase , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "dandelin/vilt-b32-finetuned-vqa"
_UpperCamelCase : int = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCamelCase : int = "image_qa"
_UpperCamelCase : Dict = AutoProcessor
_UpperCamelCase : Tuple = AutoModelForVisualQuestionAnswering
_UpperCamelCase : str = ["image", "text"]
_UpperCamelCase : List[Any] = ["text"]
def __init__( self : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : "Image" , lowerCamelCase_ : str ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
with torch.no_grad():
return self.model(**lowerCamelCase_ ).logits
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 652 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 1 |
import numpy as np
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 1_00 , ):
assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowerCAmelCase ) == np.iscomplexobj(__lowerCAmelCase )
_snake_case : Union[str, Any] = np.iscomplexobj(__lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_snake_case : Optional[Any] = False
_snake_case : Optional[int] = 0
_snake_case : List[Any] = 0
_snake_case : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
_snake_case : Dict = np.dot(__lowerCAmelCase , __lowerCAmelCase )
# Normalize the resulting output vector.
_snake_case : Dict = w / np.linalg.norm(__lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_snake_case : Dict = vector.conj().T if is_complex else vector.T
_snake_case : Optional[int] = np.dot(__lowerCAmelCase , np.dot(__lowerCAmelCase , __lowerCAmelCase ) )
# Check convergence.
_snake_case : Tuple = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_snake_case : Any = True
_snake_case : int = lambda_
if is_complex:
_snake_case : Optional[int] = np.real(lambda_ )
return lambda_, vector
def A__( ):
_snake_case : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_snake_case : int = np.array([41, 4, 20] )
_snake_case : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_snake_case : int = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_snake_case : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_snake_case : Optional[Any] = real_input_matrix
_snake_case : Dict = real_vector
elif problem_type == "complex":
_snake_case : str = complex_input_matrix
_snake_case : Dict = complex_vector
# Our implementation.
_snake_case , _snake_case : Optional[Any] = power_iteration(__lowerCAmelCase , __lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_snake_case , _snake_case : Dict = np.linalg.eigh(__lowerCAmelCase )
# Last eigenvalue is the maximum one.
_snake_case : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_snake_case : Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowerCAmelCase ) - np.abs(__lowerCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 652 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
_snake_case : Union[str, Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_snake_case : Any = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Union[str, Any]=20 , lowerCamelCase_ : Tuple=5 ):
'''simple docstring'''
_snake_case : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )) for i in range(len(lowerCamelCase_ ) )]
_snake_case : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
_snake_case : Optional[Any] = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
_snake_case : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Any = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
_snake_case : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = ' ' + output_txt
_snake_case : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
_snake_case : Optional[int] = tokenizer('m xxx ɪ' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
_snake_case : Any = tokenizer('m aaa ɪ ccc' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case : List[str] = tokenizer('maɪ c' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Optional[Any] = 'Hello how are you'
_snake_case : int = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Tuple = 'Hello how are you'
_snake_case : List[Any] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase_ ).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_ ).input_ids )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Union[str, Any] = 'Hello how are you'
_snake_case : List[str] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : int = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_snake_case : int = tokenizer.decode(sample_ids[0] )
_snake_case : Dict = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Any = 'Hello how are you'
_snake_case : int = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Optional[Any] = 'Hello how are you'
_snake_case : str = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase_ ).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_ ).input_ids )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_snake_case : Tuple = tokenizer.decode(sample_ids[0] )
_snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
_snake_case : Optional[int] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase_ )
_snake_case : str = tokenizer.batch_decode(lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Union[str, Any] = 'Hello how are you'
_snake_case : List[Any] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : str = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Any = 'Hello how are you'
_snake_case : Optional[int] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : Optional[int] = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowerCamelCase_ )
_snake_case : Optional[int] = 'Hello how are you'
_snake_case : Optional[Any] = tokenizer(lowerCamelCase_ , phonemizer_lang='en-us' ).input_ids
_snake_case : Any = tokenizer(lowerCamelCase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = tokenizer.decode(lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowerCamelCase_ , 'ɛ l o h aʊ a ʁ j u' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Tuple = 'Hello how Are you'
_snake_case : Any = 'hello how are you'
_snake_case : Dict = tokenizer(lowerCamelCase_ ).input_ids
_snake_case : Optional[Any] = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
_snake_case : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_snake_case : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : str = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_snake_case : Any = tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase_ ) )
# transform list to ModelOutput
_snake_case : Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowerCamelCase_ : Dict , lowerCamelCase_ : int ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
[recursive_check(lowerCamelCase_ , lowerCamelCase_ ) for la, la in zip(lowerCamelCase_ , lowerCamelCase_ )]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
_snake_case : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case : Tuple = tokenizer.batch_decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ )
_snake_case : Union[str, Any] = [tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = tokenizer.vocab_size
_snake_case : Union[str, Any] = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : Optional[int] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_snake_case : List[str] = tokenizer.add_tokens(lowerCamelCase_ )
_snake_case : Any = tokenizer.vocab_size
_snake_case : Dict = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , all_size + len(lowerCamelCase_ ) )
_snake_case : List[str] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowerCamelCase_ )
self.assertGreaterEqual(len(lowerCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case : List[str] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_snake_case : int = tokenizer.add_special_tokens(lowerCamelCase_ )
_snake_case : List[str] = tokenizer.vocab_size
_snake_case : str = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , all_size_a + len(lowerCamelCase_ ) )
_snake_case : Dict = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowerCamelCase_ )
self.assertGreaterEqual(len(lowerCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
_snake_case : Union[str, Any] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(output['text'] , lowerCamelCase_ )
| 652 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 1 |
from math import factorial
def A__( __lowerCAmelCase = 20 ):
_snake_case : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_snake_case : int = n // 2
return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase_ : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 652 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__( ):
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__lowerCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__lowerCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__lowerCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__lowerCAmelCase , default=0 , help='cuda_id.' , )
_snake_case : int = parser.parse_args()
return args
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not len(__lowerCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
_snake_case , _snake_case : Optional[Any] = imgs[0].size
_snake_case : List[str] = Image.new('RGB' , size=(cols * w, rows * h) )
_snake_case , _snake_case : Dict = grid.size
for i, img in enumerate(__lowerCAmelCase ):
grid.paste(__lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def A__( __lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , ):
_snake_case : List[Any] = torch.Generator(pipeline.device ).manual_seed(__lowerCAmelCase )
_snake_case : int = pipeline(
__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , ).images
_snake_case : Union[str, Any] = int(math.sqrt(__lowerCAmelCase ) )
_snake_case : Any = image_grid(__lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : Dict = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
lowercase_ : Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
lowercase_ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
lowercase_ : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
lowercase_ : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : List[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
lowercase_ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
lowercase_ : Optional[Any] = unet.to(torch.device('''cuda''', args.cuda_id))
lowercase_ : List[str] = pipeline.to(unet.device)
lowercase_ , lowercase_ : Any = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
lowercase_ : List[str] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 652 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
try:
_snake_case : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case : str = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case : Optional[int] = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
lowercase_ : str = parse_flag_from_env('''RUN_SLOW''', default=False)
def A__( __lowerCAmelCase ):
return unittest.skip('Test was skipped' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowerCAmelCase )
def A__( __lowerCAmelCase=None , __lowerCAmelCase=None ):
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , __lowerCAmelCase ) , F'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowerCAmelCase )
lowercase_ : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A__( __lowerCAmelCase ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowerCAmelCase )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = True
@classmethod
def __UpperCAmelCase ( cls : List[Any] ):
'''simple docstring'''
_snake_case : str = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls : int ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase_ )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
_snake_case : Tuple = mocks if isinstance(lowerCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = AcceleratorState()
_snake_case : Union[str, Any] = tensor[None].clone().to(state.device )
_snake_case : str = gather(__lowerCAmelCase ).cpu()
_snake_case : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class lowercase :
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = returncode
_snake_case : Optional[Any] = stdout
_snake_case : str = stderr
async def A__( __lowerCAmelCase , __lowerCAmelCase ):
while True:
_snake_case : Optional[Any] = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def A__( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False ):
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
_snake_case : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case : List[str] = []
_snake_case : Optional[int] = []
def tee(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="" ):
_snake_case : Tuple = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1_80 , __lowerCAmelCase=False , __lowerCAmelCase=True ):
_snake_case : Optional[int] = asyncio.get_event_loop()
_snake_case : Tuple = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
_snake_case : Optional[int] = ' '.join(__lowerCAmelCase )
if result.returncode > 0:
_snake_case : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowercase ( a_ ):
"""simple docstring"""
pass
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
try:
_snake_case : Optional[Any] = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , 'decode' ):
_snake_case : List[Any] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 652 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Tuple = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
lowercase_ : int = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = SqueezeBertTokenizer
def __init__( self : Tuple , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]="[UNK]" , lowerCamelCase_ : List[Any]="[SEP]" , lowerCamelCase_ : Optional[Any]="[PAD]" , lowerCamelCase_ : Optional[int]="[CLS]" , lowerCamelCase_ : str="[MASK]" , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
_snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) )
_snake_case : Union[str, Any] = do_lower_case
_snake_case : str = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : List[Any] = normalizer_class(**lowerCamelCase_ )
_snake_case : Union[str, Any] = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
_snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[int] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 652 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : str = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "t5"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : List[str] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[Any] , lowerCamelCase_ : Dict=3_21_28 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : Optional[int]=64 , lowerCamelCase_ : Union[str, Any]=20_48 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=8 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Any=1_28 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Dict=1e-6 , lowerCamelCase_ : Any=1.0 , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=0 , lowerCamelCase_ : Dict=1 , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
_snake_case : str = vocab_size
_snake_case : Tuple = d_model
_snake_case : Union[str, Any] = d_kv
_snake_case : Any = d_ff
_snake_case : Dict = num_layers
_snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case : Any = num_heads
_snake_case : Tuple = relative_attention_num_buckets
_snake_case : Union[str, Any] = relative_attention_max_distance
_snake_case : List[str] = dropout_rate
_snake_case : List[Any] = layer_norm_epsilon
_snake_case : Dict = initializer_factor
_snake_case : Any = feed_forward_proj
_snake_case : Dict = use_cache
_snake_case : Any = self.feed_forward_proj.split('-' )
_snake_case : Dict = act_info[-1]
_snake_case : Dict = act_info[0] == 'gated'
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case : Union[str, Any] = 'gelu_new'
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_snake_case : Union[str, Any] = 'past_encoder_sequence + sequence'
_snake_case : Any = {0: 'batch'}
_snake_case : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
_snake_case : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='inputs' )
return common_inputs
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return 13
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ObjectDetectionPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(lowerCamelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_ , {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
} , )
import datasets
_snake_case : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_snake_case : str = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_snake_case : str = object_detector(lowerCamelCase_ , threshold=0.0 )
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_ , {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_snake_case : Any = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
_snake_case : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
_snake_case : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 'facebook/detr-resnet-50'
_snake_case : Tuple = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
_snake_case : Tuple = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
_snake_case : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = 'facebook/detr-resnet-50'
_snake_case : Any = pipeline('object-detection' , model=lowerCamelCase_ )
_snake_case : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = 0.9985
_snake_case : Optional[Any] = 'facebook/detr-resnet-50'
_snake_case : str = pipeline('object-detection' , model=lowerCamelCase_ )
_snake_case : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
_snake_case : Optional[int] = 0.9993
_snake_case : List[Any] = pipeline('object-detection' , model=lowerCamelCase_ , threshold=lowerCamelCase_ )
_snake_case : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 652 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
from scipy.stats import pearsonr
import datasets
lowercase_ : List[Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
lowercase_ : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
lowercase_ : int = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=False ):
'''simple docstring'''
if return_pvalue:
_snake_case : Tuple = pearsonr(lowerCamelCase_ , lowerCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )}
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(__lowerCAmelCase )
or left < -len(__lowerCAmelCase )
or right >= len(__lowerCAmelCase )
or right < -len(__lowerCAmelCase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_snake_case : int = (left + right) >> 1 # the middle
_snake_case : List[str] = find_max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # find max in range[left, mid]
_snake_case : int = find_max(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = (3, 32, 1_28)
_snake_case : Dict = tempfile.mkdtemp()
# fmt: off
_snake_case : Dict = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_snake_case : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
_snake_case : Union[str, Any] = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : str = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
_snake_case : Union[str, Any] = Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) )
return image_input
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Any = self.get_image_processor()
_snake_case : str = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = self.get_image_processor()
_snake_case : Any = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : Tuple = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_snake_case : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Any = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Tuple = image_processor(lowerCamelCase_ , return_tensors='np' )
_snake_case : List[str] = processor(images=lowerCamelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : int = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : str = 'test'
_snake_case : Any = processor(text=lowerCamelCase_ )
_snake_case : Any = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[Any] = 'test'
_snake_case : Union[str, Any] = self.prepare_image_inputs()
_snake_case : List[Any] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : int = processor.char_decode(lowerCamelCase_ )
_snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )
_snake_case : List[Any] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Dict = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[str] = None
_snake_case : Tuple = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : Dict = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Dict = torch.randn(1 , 27 , 38 )
_snake_case : Any = torch.randn(1 , 27 , 5_02_57 )
_snake_case : Any = torch.randn(1 , 27 , 3_05_22 )
_snake_case : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 652 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : str , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Tuple="<pad>" , lowerCamelCase_ : List[str]=1_25 , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : int , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_snake_case : str = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_snake_case : Any = len(set(filter(lambda lowerCamelCase_ : bool('extra_id' in str(lowerCamelCase_ ) ) , lowerCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_snake_case : Tuple = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
_snake_case : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_snake_case : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
super().__init__(
eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , extra_ids=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
_snake_case : Tuple = extra_ids
_snake_case : Any = 2**8 # utf is 8 bits
# define special tokens dict
_snake_case : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_snake_case : Optional[int] = len(self.special_tokens_encoder )
_snake_case : Dict = len(lowerCamelCase_ )
for i, token in enumerate(lowerCamelCase_ ):
_snake_case : Any = self.vocab_size + i - n
_snake_case : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase_ )) + [1]
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[int] ):
'''simple docstring'''
if len(lowerCamelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._add_eos_if_not_present(lowerCamelCase_ )
if token_ids_a is None:
return token_ids_a
else:
_snake_case : Optional[int] = self._add_eos_if_not_present(lowerCamelCase_ )
return token_ids_a + token_ids_a
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Any = [chr(lowerCamelCase_ ) for i in text.encode('utf-8' )]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
if token in self.special_tokens_encoder:
_snake_case : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_snake_case : str = self.added_tokens_encoder[token]
elif len(lowerCamelCase_ ) != 1:
_snake_case : List[str] = self.unk_token_id
else:
_snake_case : List[str] = ord(lowerCamelCase_ ) + self._num_special_tokens
return token_id
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
_snake_case : int = self.special_tokens_decoder[index]
else:
_snake_case : int = chr(index - self._num_special_tokens )
return token
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : int = b''
for token in tokens:
if token in self.special_tokens_decoder:
_snake_case : Optional[int] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_snake_case : int = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_snake_case : str = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_snake_case : str = token.encode('utf-8' )
else:
_snake_case : List[Any] = bytes([ord(lowerCamelCase_ )] )
bstring += tok_string
_snake_case : str = bstring.decode('utf-8' , errors='ignore' )
return string
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
return ()
| 652 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 1 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
from collections.abc import Sequence
def A__( __lowerCAmelCase , __lowerCAmelCase = False ):
if not arr:
return 0
_snake_case : Optional[int] = 0 if allow_empty_subarrays else float('-inf' )
_snake_case : List[Any] = 0.0
for num in arr:
_snake_case : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case : Union[str, Any] = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 652 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : int = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : int = "megatron-bert"
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[str]=2_90_56 , lowerCamelCase_ : Optional[Any]=10_24 , lowerCamelCase_ : Union[str, Any]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : int=40_96 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Any=5_12 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Union[str, Any]="absolute" , lowerCamelCase_ : Dict=True , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[str] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : List[str] = use_cache
| 652 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 1 |
from __future__ import annotations
import queue
class lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = data
_snake_case : List[str] = None
_snake_case : Tuple = None
def A__( ):
print('\n********Press N to stop entering at any point of time********\n' )
_snake_case : Any = input('Enter the value of the root node: ' ).strip().lower()
_snake_case : queue.Queue = queue.Queue()
_snake_case : str = TreeNode(int(__lowerCAmelCase ) )
q.put(__lowerCAmelCase )
while not q.empty():
_snake_case : List[str] = q.get()
_snake_case : Optional[Any] = F'''Enter the left node of {node_found.data}: '''
_snake_case : Tuple = input(__lowerCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_snake_case : List[Any] = TreeNode(int(__lowerCAmelCase ) )
_snake_case : Dict = left_node
q.put(__lowerCAmelCase )
_snake_case : int = F'''Enter the right node of {node_found.data}: '''
_snake_case : Dict = input(__lowerCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_snake_case : Union[str, Any] = TreeNode(int(__lowerCAmelCase ) )
_snake_case : Optional[Any] = right_node
q.put(__lowerCAmelCase )
raise
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_snake_case : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_snake_case : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_snake_case : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_snake_case : List[str] = []
while not q.empty():
_snake_case : Tuple = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_snake_case : list[TreeNode] = []
_snake_case : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__lowerCAmelCase )
_snake_case : Optional[Any] = n.left
# end of while means current node doesn't have left child
_snake_case : Tuple = stack.pop()
# start to traverse its right child
_snake_case : int = n.right
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_snake_case : list[TreeNode] = []
_snake_case : int = node
while n or stack:
while n:
stack.append(__lowerCAmelCase )
_snake_case : Union[str, Any] = n.left
_snake_case : str = stack.pop()
print(n.data , end=',' )
_snake_case : Optional[int] = n.right
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_snake_case , _snake_case : List[str] = [], []
_snake_case : Optional[int] = node
stacka.append(__lowerCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_snake_case : Any = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A__( __lowerCAmelCase = "" , __lowerCAmelCase=50 , __lowerCAmelCase="*" ):
if not s:
return "\n" + width * char
_snake_case , _snake_case : Dict = divmod(width - len(__lowerCAmelCase ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
lowercase_ : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 652 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , 'depth_multiplier' ) )
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Optional[Any]=0.25 , lowerCamelCase_ : str=8 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : int=10_24 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : str="relu6" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : str=None , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : int = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : Optional[Any] = image_size
_snake_case : Optional[Any] = depth_multiplier
_snake_case : Tuple = min_depth
_snake_case : Any = tf_padding
_snake_case : Optional[Any] = int(last_hidden_size * depth_multiplier )
_snake_case : Any = output_stride
_snake_case : str = hidden_act
_snake_case : Dict = classifier_dropout_prob
_snake_case : Union[str, Any] = use_labels
_snake_case : Dict = is_training
_snake_case : str = num_labels
_snake_case : Union[str, Any] = initializer_range
_snake_case : Optional[int] = scope
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[Any] = None
_snake_case : Dict = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = MobileNetVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.num_labels
_snake_case : Tuple = MobileNetVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Tuple = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Dict = config_and_inputs
_snake_case : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCamelCase : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Tuple = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = MobileNetVaModelTester(self )
_snake_case : str = MobileNetVaConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = model_class(lowerCamelCase_ )
_snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Any = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
_snake_case : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_snake_case : str = outputs.hidden_states
_snake_case : Tuple = 26
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = MobileNetVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__( ):
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCamelCase_ )
_snake_case : List[str] = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : Optional[Any] = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**lowerCamelCase_ )
# verify the logits
_snake_case : Any = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_snake_case : List[str] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 652 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ : Tuple = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowercase_ : List[Any] = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowercase_ : Dict = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return float((preds == labels).mean() )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : int = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
_snake_case : str = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
lowercase_ : int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowercase_ : Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = True
_snake_case : List[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
order.append(__lowerCAmelCase )
return order
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = True
_snake_case : Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return component
def A__( __lowerCAmelCase ):
_snake_case : Any = len(__lowerCAmelCase ) * [False]
_snake_case : dict[int, list[int]] = {vert: [] for vert in range(len(__lowerCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowerCAmelCase )
_snake_case : Optional[int] = []
for i, was_visited in enumerate(__lowerCAmelCase ):
if not was_visited:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = []
_snake_case : Dict = len(__lowerCAmelCase ) * [False]
for i in range(len(__lowerCAmelCase ) ):
_snake_case : List[Any] = order[len(__lowerCAmelCase ) - i - 1]
if not visited[vert]:
_snake_case : Union[str, Any] = find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
components_list.append(__lowerCAmelCase )
return components_list
| 652 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 1 |
import math
def A__( __lowerCAmelCase = 1_00 ):
_snake_case : str = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : Union[str, Any] = 16
lowercase_ : Dict = 32
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 16 ):
_snake_case : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case : str = DatasetDict(
{
'train': dataset['train'].select(__lowerCAmelCase ),
'validation': dataset['train'].select(__lowerCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Optional[int] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_snake_case : List[str] = 8
else:
_snake_case : List[Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding='longest' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case : int = DataLoader(
tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_snake_case : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_snake_case : Optional[int] = DataLoader(
tokenized_datasets['test'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# New Code #
_snake_case : Union[str, Any] = []
# Download the dataset
_snake_case : List[str] = load_dataset('glue' , 'mrpc' )
# Create our splits
_snake_case : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Any = config['lr']
_snake_case : List[str] = int(config['num_epochs'] )
_snake_case : Union[str, Any] = int(config['seed'] )
_snake_case : List[str] = int(config['batch_size'] )
_snake_case : int = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_snake_case : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_snake_case : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
# New Code #
# Create our folds:
_snake_case : str = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_snake_case : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = get_fold_dataloaders(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : int = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
_snake_case : Dict = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : List[Any] = model(**__lowerCAmelCase )
_snake_case : str = outputs.loss
_snake_case : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**__lowerCAmelCase )
_snake_case : Any = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
_snake_case : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
_snake_case : List[Any] = []
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**__lowerCAmelCase )
_snake_case : str = outputs.logits
_snake_case , _snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__lowerCAmelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_snake_case : Tuple = torch.cat(__lowerCAmelCase , dim=0 )
_snake_case : List[str] = torch.stack(__lowerCAmelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_snake_case : List[Any] = metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
accelerator.print('Average test metrics from all folds:' , __lowerCAmelCase )
def A__( ):
_snake_case : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__lowerCAmelCase , default=3 , help='The number of splits to perform across the dataset' )
_snake_case : Any = parser.parse_args()
_snake_case : List[str] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Tuple = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = DebertaVaTokenizer
_UpperCamelCase : List[str] = DebertaVaTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : str = DebertaVaTokenizer(lowerCamelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'this is a test'
_snake_case : str = 'this is a test'
return input_text, output_text
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : int = '<pad>'
_snake_case : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowerCamelCase_ ) , 3_00_01 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = ' \tHeLLo!how \n Are yoU? '
_snake_case : Dict = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_snake_case : Dict = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_snake_case : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = 'I was born in 92000, and this is falsé.'
_snake_case : Dict = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_snake_case : Dict = DebertaVaTokenizer(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = DebertaVaTokenizerFast(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = 'I was born in 92000, and this is falsé.'
_snake_case : Dict = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_snake_case : Any = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = 'I was born in 92000, and this is falsé.'
_snake_case : str = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_snake_case : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = 'I was born in 92000, and this is falsé.'
_snake_case : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_snake_case : int = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = ' \tHeLLo!how \n Are yoU? '
_snake_case : Any = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_snake_case : Any = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Dict = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_snake_case : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
_snake_case : int = 'I was born in 92000, and this is falsé.'
_snake_case : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
_snake_case : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : Optional[Any] = tokenizer.encode(lowerCamelCase_ )
_snake_case : List[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = 'This is a test'
_snake_case : Optional[Any] = [13, 1, 43_98, 25, 21, 12_89]
_snake_case : Union[str, Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_snake_case : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_snake_case : List[Any] = DebertaVaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_snake_case : List[Any] = DebertaVaTokenizerFast(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_snake_case : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# fmt: off
_snake_case : Optional[Any] = 'I was born in 92000, and this is falsé.'
_snake_case : Tuple = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
_snake_case : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_snake_case : Dict = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_snake_case : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = DebertaVaTokenizer(lowerCamelCase_ )
_snake_case : Dict = tokenizer.encode('sequence builders' )
_snake_case : Union[str, Any] = tokenizer.encode('multi-sequence build' )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase_ , )
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 652 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase_ : Tuple = get_logger(__name__)
lowercase_ : Union[str, Any] = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class lowercase :
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Any , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Dict , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( a_ ):
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Tuple , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
'''simple docstring'''
for processor in self:
_snake_case : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
_snake_case : Dict = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
else:
_snake_case : List[Any] = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : float ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_snake_case : Tuple = temperature
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : int = scores / self.temperature
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : float , lowerCamelCase_ : float = -float('Inf' ) , lowerCamelCase_ : int = 1 ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_snake_case : str = top_p
_snake_case : List[Any] = filter_value
_snake_case : Optional[int] = min_tokens_to_keep
def __call__( self : Dict , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case , _snake_case : Dict = lax.top_k(lowerCamelCase_ , scores.shape[-1] )
_snake_case : List[Any] = jnp.full_like(lowerCamelCase_ , self.filter_value )
_snake_case : str = jax.nn.softmax(lowerCamelCase_ , axis=-1 ).cumsum(axis=-1 )
_snake_case : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_snake_case : Optional[int] = jnp.roll(lowerCamelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
_snake_case : int = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
_snake_case : Optional[int] = jnp.where(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = jax.lax.sort_key_val(lowerCamelCase_ , lowerCamelCase_ )[-1]
return next_scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : float = -float('Inf' ) , lowerCamelCase_ : int = 1 ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_snake_case : List[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = filter_value
def __call__( self : str , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case , _snake_case : str = scores.shape
_snake_case : Any = jnp.full(batch_size * vocab_size , self.filter_value )
_snake_case : Dict = min(self.top_k , scores.shape[-1] ) # Safety check
_snake_case , _snake_case : Optional[Any] = lax.top_k(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_snake_case : List[str] = topk_scores.flatten()
_snake_case : Any = topk_indices.flatten() + shift
_snake_case : Tuple = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
_snake_case : List[Any] = next_scores_flat.reshape(lowerCamelCase_ , lowerCamelCase_ )
return next_scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = bos_token_id
def __call__( self : str , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = jnp.full(scores.shape , -float('inf' ) )
_snake_case : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
_snake_case : Any = jnp.where(lowerCamelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[int] = max_length
_snake_case : Union[str, Any] = eos_token_id
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Any = jnp.full(scores.shape , -float('inf' ) )
_snake_case : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_snake_case : str = jnp.where(lowerCamelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_snake_case : str = min_length
_snake_case : Optional[Any] = eos_token_id
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_snake_case : Dict = jnp.where(lowerCamelCase_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = list(lowerCamelCase_ )
_snake_case : int = begin_index
def __call__( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Any = 1 - jnp.bool_(cur_len - self.begin_index )
_snake_case : Union[str, Any] = jnp.where(lowerCamelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : list ):
'''simple docstring'''
_snake_case : Optional[Any] = list(lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[int] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_snake_case : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_snake_case : Any = force_token_array.at[index].set(lowerCamelCase_ )
_snake_case : Union[str, Any] = jnp.intaa(lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
def _force_token(lowerCamelCase_ : Any ):
_snake_case : int = scores.shape[0]
_snake_case : List[str] = self.force_token_array[generation_idx]
_snake_case : List[Any] = jnp.ones_like(lowerCamelCase_ , dtype=scores.dtype ) * -float('inf' )
_snake_case : int = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_snake_case : List[Any] = lax.dynamic_update_slice(lowerCamelCase_ , lowerCamelCase_ , (0, current_token) )
return new_scores
_snake_case : Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase_ ) , lambda: scores , ) , )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = generate_config.eos_token_id
_snake_case : Any = generate_config.no_timestamps_token_id
_snake_case : Optional[Any] = generate_config.no_timestamps_token_id + 1
_snake_case : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ , 'max_initial_timestamp_index' ):
_snake_case : str = generate_config.max_initial_timestamp_index
else:
_snake_case : Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_snake_case : List[Any] = model_config.vocab_size
def __call__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
_snake_case : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase_ , )
_snake_case : Any = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase_ , lowerCamelCase_ , )
return jnp.where(
lowerCamelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , lowerCamelCase_ , )
_snake_case : Optional[Any] = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = jnp.where(cur_len == self.begin_index , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase_ , )
_snake_case : Any = self.timestamp_begin + self.max_initial_timestamp_index
_snake_case : str = jnp.where(
lowerCamelCase_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , lowerCamelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_snake_case : List[Any] = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
_snake_case : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_snake_case : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , lowerCamelCase_ , )
_snake_case : Optional[int] = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
return scores
| 652 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from math import factorial
def A__( __lowerCAmelCase = 1_00 ):
return sum(map(__lowerCAmelCase , str(factorial(__lowerCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "perceiver"
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[Any]=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : int=7_68 , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[Any]=26 , lowerCamelCase_ : Optional[int]=8 , lowerCamelCase_ : Tuple=8 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str="kv" , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=2_62 , lowerCamelCase_ : Any=20_48 , lowerCamelCase_ : str=56 , lowerCamelCase_ : Any=[3_68, 4_96] , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : str=19_20 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : Tuple=[1, 16, 2_24, 2_24] , **lowerCamelCase_ : Any , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Tuple = num_latents
_snake_case : Tuple = d_latents
_snake_case : int = d_model
_snake_case : Tuple = num_blocks
_snake_case : List[str] = num_self_attends_per_block
_snake_case : List[Any] = num_self_attention_heads
_snake_case : str = num_cross_attention_heads
_snake_case : Any = qk_channels
_snake_case : Dict = v_channels
_snake_case : List[Any] = cross_attention_shape_for_attention
_snake_case : List[Any] = self_attention_widening_factor
_snake_case : Union[str, Any] = cross_attention_widening_factor
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = attention_probs_dropout_prob
_snake_case : Dict = initializer_range
_snake_case : Optional[int] = layer_norm_eps
_snake_case : str = use_query_residual
# masked language modeling attributes
_snake_case : Union[str, Any] = vocab_size
_snake_case : int = max_position_embeddings
# image classification attributes
_snake_case : Optional[Any] = image_size
# flow attributes
_snake_case : Any = train_size
# multimodal autoencoding attributes
_snake_case : Optional[int] = num_frames
_snake_case : Union[str, Any] = audio_samples_per_frame
_snake_case : List[Any] = samples_per_patch
_snake_case : Optional[Any] = output_shape
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return 1e-4
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : str = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : List[Any] = preprocessor.num_special_tokens_to_add(lowerCamelCase_ )
_snake_case : Union[str, Any] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : int = [' '.join(['a'] ) * seq_length] * batch_size
_snake_case : List[str] = dict(preprocessor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : str = inputs.pop('input_ids' )
return inputs
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : str = compute_effective_axis_dimension(lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_snake_case : List[str] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Dict = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : int = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 652 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def A__( __lowerCAmelCase=None ):
if subparsers is not None:
_snake_case : List[str] = subparsers.add_parser('env' )
else:
_snake_case : Optional[int] = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=__lowerCAmelCase , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = torch.__version__
_snake_case : Any = torch.cuda.is_available()
_snake_case : Tuple = is_xpu_available()
_snake_case : Tuple = is_npu_available()
_snake_case : Tuple = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
_snake_case : Any = load_config_from_file(args.config_file ).to_dict()
_snake_case : str = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(__lowerCAmelCase ),
'PyTorch NPU available': str(__lowerCAmelCase ),
'System RAM': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_snake_case : Union[str, Any] = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
_snake_case : Optional[int] = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else F'''\t{accelerate_config}'''
)
print(__lowerCAmelCase )
_snake_case : List[str] = accelerate_config
return info
def A__( ):
_snake_case : List[str] = env_command_parser()
_snake_case : Dict = parser.parse_args()
env_command(__lowerCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 652 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Union[str, Any] = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
from __future__ import annotations
def A__( __lowerCAmelCase ):
return [ord(__lowerCAmelCase ) - 96 for elem in plain]
def A__( __lowerCAmelCase ):
return "".join(chr(elem + 96 ) for elem in encoded )
def A__( ):
_snake_case : int = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , __lowerCAmelCase )
print('Decoded:' , decode(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 652 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ : str = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Dict=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case : List[Any] = UNetaDModel(sample_size=lowerCamelCase_ , in_channels=3 , out_channels=3 )
_snake_case : Dict = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCamelCase_ , )
_snake_case : Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase_ ) for _ in range(4 )]
_snake_case : Any = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase_ ) for _ in range(4 )]
_snake_case : int = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case : Tuple = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
_snake_case : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case : Optional[int] = model(lowerCamelCase_ , timesteps[i] ).sample
_snake_case : str = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case : Any = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
_snake_case : List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case : str = model(lowerCamelCase_ , timesteps[i] ).sample
_snake_case : Any = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
lowercase_ : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case : Dict = year // 1_00
_snake_case : List[Any] = (5 * (century % 4) + 2) % 7
_snake_case : int = year % 1_00
_snake_case : Any = centurian % 12
_snake_case : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case : Tuple = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 1 |
from string import ascii_uppercase
lowercase_ : Optional[int] = {str(ord(c) - 55): c for c in ascii_uppercase}
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
_snake_case : Any = ''
_snake_case : int = 0
_snake_case : int = 0
while div != 1:
_snake_case , _snake_case : str = divmod(__lowerCAmelCase , __lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
_snake_case : List[Any] = ALPHABET_VALUES[str(__lowerCAmelCase )]
else:
_snake_case : List[str] = str(__lowerCAmelCase )
new_value += actual_value
_snake_case : Any = num // base
_snake_case : Dict = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 652 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Construct model
if openai_config_file == "":
_snake_case : Optional[Any] = OpenAIGPTConfig()
else:
_snake_case : Union[str, Any] = OpenAIGPTConfig.from_json_file(__lowerCAmelCase )
_snake_case : int = OpenAIGPTModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
_snake_case : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_snake_case : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ : Dict = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 652 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : List[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__( __lowerCAmelCase ):
print('Loading config file...' )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
_snake_case : Tuple = []
for k, v in d.items():
_snake_case : str = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
_snake_case : Dict = argparse.Namespace()
with open(__lowerCAmelCase , 'r' ) as yaml_file:
try:
_snake_case : Tuple = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
_snake_case : Tuple = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = MobileViTVaConfig()
_snake_case : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
_snake_case : int = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Optional[int] = 3_84
else:
_snake_case : Dict = 2_56
_snake_case : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_snake_case : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Optional[int] = 3_84
else:
_snake_case : Tuple = 2_56
_snake_case : int = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_snake_case : int = 1_51
_snake_case : int = 5_12
_snake_case : Tuple = 'ade20k-id2label.json'
_snake_case : List[str] = True
elif task_name.startswith('voc_' ):
_snake_case : Union[str, Any] = 21
_snake_case : str = 5_12
_snake_case : int = 'pascal-voc-id2label.json'
_snake_case : Union[str, Any] = True
# orig_config
_snake_case : Tuple = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : int = getattr(__lowerCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__lowerCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : str = getattr(__lowerCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Any = getattr(__lowerCAmelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_snake_case : Dict = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_snake_case : Any = 'huggingface/label-files'
_snake_case : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = dct.pop(__lowerCAmelCase )
_snake_case : int = val
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
_snake_case : Optional[int] = ''
else:
_snake_case : List[Any] = 'mobilevitv2.'
_snake_case : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[Any] = k[8:]
else:
_snake_case : Optional[Any] = k
if ".block." in k:
_snake_case : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_snake_case : Optional[int] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_snake_case : Any = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_snake_case : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_snake_case : List[str] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_snake_case : Union[str, Any] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_snake_case : Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_snake_case : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_snake_case : str = [0, 1]
elif i == 4:
_snake_case : str = [0, 1, 2, 3]
elif i == 5:
_snake_case : Tuple = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_snake_case : Optional[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_snake_case : List[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_snake_case : Union[str, Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_snake_case : Dict = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_snake_case : List[str] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_snake_case : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_snake_case : int = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_snake_case : Dict = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_snake_case : str = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_snake_case : Optional[int] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A__( __lowerCAmelCase ):
_snake_case : Any = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( ):
_snake_case : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : List[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
_snake_case : Optional[int] = torch.load(__lowerCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_snake_case : Union[str, Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
_snake_case : Tuple = False
else:
_snake_case : Any = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Any = checkpoint
remove_unused_keys(__lowerCAmelCase )
_snake_case : str = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : Tuple = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
_snake_case : Union[str, Any] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 652 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
from __future__ import annotations
from math import pow, sqrt
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ : Optional[Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : int ):
'''simple docstring'''
_snake_case : Optional[Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_snake_case : Dict = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ , repo_id='test-config' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_snake_case : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_snake_case : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_snake_case : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_snake_case : int = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_snake_case : Dict = c.n_embd + 1 # int
_snake_case : Union[str, Any] = c.resid_pdrop + 1.0 # float
_snake_case : int = not c.scale_attn_weights # bool
_snake_case : Dict = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = PretrainedConfig()
_snake_case : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_snake_case : int = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_ , lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_snake_case : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_snake_case : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = mock.Mock()
_snake_case : Optional[int] = 5_00
_snake_case : Any = {}
_snake_case : List[str] = HTTPError
_snake_case : Any = {}
# Download this model to make sure it's in the cache.
_snake_case : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase_ ) as mock_head:
_snake_case : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = AutoConfig.from_pretrained('bert-base-cased' )
_snake_case : Union[str, Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
_snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_snake_case : int = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_snake_case : List[str] = ['config.42.0.0.json']
_snake_case : Dict = 7_68
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_ , 'config.4.0.0.json' ) , os.path.join(lowerCamelCase_ , 'config.42.0.0.json' ) )
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_snake_case : Union[str, Any] = 'v4.0.0'
_snake_case , _snake_case : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_snake_case : Tuple = 'v3.0.0'
_snake_case : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 652 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 1 |
from jiwer import compute_measures
import datasets
lowercase_ : Union[str, Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowercase_ : Any = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowercase_ : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"]
else:
_snake_case : str = 0
_snake_case : Dict = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = compute_measures(lowerCamelCase_ , lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 652 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase_ : Any = dict(zip(vocab, range(len(vocab))))
lowercase_ : List[str] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Tuple = Path(tmpdirname)
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase_ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase_ : List[str] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase_ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase_ : List[str] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowercase_ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase_ : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 652 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowercase_ : List[str] = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 42
_UpperCamelCase : Optional[int] = 42
_UpperCamelCase : Optional[int] = 42
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : Tuple = 42
_UpperCamelCase : Dict = 42
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = None
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
_UpperCamelCase : Dict = "train"
_UpperCamelCase : Union[str, Any] = "dev"
_UpperCamelCase : str = "test"
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Any=False , lowerCamelCase_ : List[Any]="[CLS]" , lowerCamelCase_ : Optional[int]=1 , lowerCamelCase_ : Optional[int]="[SEP]" , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Dict=-1_00 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Union[str, Any]=True , ):
'''simple docstring'''
_snake_case : Any = {label: i for i, label in enumerate(A__ )}
_snake_case : int = []
for ex_index, example in enumerate(A__ ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' , A__ , len(A__ ) )
_snake_case : List[str] = []
_snake_case : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
_snake_case : Union[str, Any] = tokenizer.tokenize(A__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A__ ) > 0:
tokens.extend(A__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_snake_case : List[Any] = tokenizer.num_special_tokens_to_add()
if len(A__ ) > max_seq_length - special_tokens_count:
_snake_case : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
_snake_case : List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_snake_case : Optional[int] = [sequence_a_segment_id] * len(A__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_snake_case : List[str] = [cls_token] + tokens
_snake_case : str = [pad_token_label_id] + label_ids
_snake_case : Dict = [cls_token_segment_id] + segment_ids
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(A__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_snake_case : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(A__ )
# Zero-pad up to the sequence length.
_snake_case : Tuple = max_seq_length - len(A__ )
if pad_on_left:
_snake_case : Any = ([pad_token] * padding_length) + input_ids
_snake_case : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_snake_case : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
_snake_case : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(A__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(A__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(A__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(A__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(A__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case : Any = None
features.append(
InputFeatures(
input_ids=A__ , attention_mask=A__ , token_type_ids=A__ , label_ids=A__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = 42
_UpperCamelCase : Union[str, Any] = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int = None , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str] = Split.train , ):
'''simple docstring'''
_snake_case : Optional[Any] = os.path.join(
A__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(A__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case : Any = cached_features_file + '.lock'
with FileLock(A__ ):
if os.path.exists(A__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_snake_case : int = torch.load(A__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_snake_case : Optional[int] = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case : Dict = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , A__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
"""simple docstring"""
_UpperCamelCase : str = 42
_UpperCamelCase : str = -100
def __init__( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Union[str, Any] = Split.train , ):
'''simple docstring'''
_snake_case : Dict = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case : Tuple = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case : Any = tf.data.Dataset.from_generator(
A__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_snake_case : Dict = tf.data.Dataset.from_generator(
A__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
return self.features[i]
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import numpy as np
from PIL import Image
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = np.array(__A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case : List[str] = 0
_snake_case : Optional[int] = 0
_snake_case : Any = 0
_snake_case : Tuple = 0
# compute the shape of the output matrix
_snake_case : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_snake_case : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_snake_case : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case : List[str] = 0
_snake_case : Dict = 0
return updated_arr
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = np.array(__A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case : str = 0
_snake_case : List[str] = 0
_snake_case : Union[str, Any] = 0
_snake_case : Any = 0
# compute the shape of the output matrix
_snake_case : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_snake_case : int = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_snake_case : Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case : str = 0
_snake_case : Optional[int] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
lowercase_ : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from functools import lru_cache
@lru_cache
def A__( __lowerCAmelCase ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __lowerCAmelCase )
_snake_case : Optional[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_snake_case : List[Any] = dataset_size < in_memory_max_size
else:
_snake_case : List[Any] = False
_snake_case : Any = is_small_dataset(__lowerCAmelCase )
assert result == expected
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Dict = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowercase_ : Tuple = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowercase_ : List[str] = 0
lowercase_ : List[str] = 1
lowercase_ : List[str] = 2
lowercase_ : Tuple = 3
lowercase_ : Optional[int] = 4
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = "left"
def __init__( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=False , lowerCamelCase_ : int=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : Optional[Any]="<unk>" , lowerCamelCase_ : int="<sep>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : Optional[int]="<cls>" , lowerCamelCase_ : int="<mask>" , lowerCamelCase_ : Optional[int]=["<eop>", "<eod>"] , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
_snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_snake_case : List[Any] = 3
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = remove_space
_snake_case : Union[str, Any] = keep_accents
_snake_case : Optional[Any] = vocab_file
_snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.__dict__.copy()
_snake_case : Optional[int] = None
return state
def __setstate__( self : List[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : int = {}
_snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
if self.remove_space:
_snake_case : Optional[int] = ' '.join(inputs.strip().split() )
else:
_snake_case : List[str] = inputs
_snake_case : int = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_snake_case : Tuple = unicodedata.normalize('NFKD' , UpperCamelCase_ )
_snake_case : Any = ''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
_snake_case : int = outputs.lower()
return outputs
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Tuple = self.preprocess_text(UpperCamelCase_ )
_snake_case : Dict = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
_snake_case : List[Any] = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_snake_case : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case : Optional[int] = cur_pieces[1:]
else:
_snake_case : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : List[Any] = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = kwargs.pop('use_source_tokenizer' , UpperCamelCase_ )
_snake_case : int = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_snake_case : List[Any] = []
_snake_case : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
_snake_case : Optional[int] = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_snake_case : Tuple = ''.join(UpperCamelCase_ )
_snake_case : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_snake_case : List[str] = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Any = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
_snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int = 7_68 , ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) )
_snake_case : int = nn.Parameter(torch.ones(1 , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Union[str, torch.device]] = None , lowerCamelCase_ : Optional[torch.dtype] = None , ):
'''simple docstring'''
_snake_case : Dict = nn.Parameter(self.mean.to(__lowerCamelCase ).to(__lowerCamelCase ) )
_snake_case : Union[str, Any] = nn.Parameter(self.std.to(__lowerCamelCase ).to(__lowerCamelCase ) )
return self
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = (embeds * self.std) + self.mean
return embeds
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import requests
lowercase_ : Tuple = "YOUR API KEY"
def A__( __lowerCAmelCase , __lowerCAmelCase = giphy_api_key ):
_snake_case : Dict = '+'.join(query.split() )
_snake_case : str = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_snake_case : str = requests.get(__lowerCAmelCase ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_snake_case : Tuple = timm.create_model('levit_128s' , pretrained=__A )
else:
_snake_case : int = timm.create_model('levit_128' , pretrained=__A )
if hidden_sizes == 1_92:
_snake_case : List[Any] = timm.create_model('levit_192' , pretrained=__A )
if hidden_sizes == 2_56:
_snake_case : str = timm.create_model('levit_256' , pretrained=__A )
if hidden_sizes == 3_84:
_snake_case : Optional[int] = timm.create_model('levit_384' , pretrained=__A )
from_model.eval()
_snake_case : List[str] = LevitForImageClassificationWithTeacher(__A ).eval()
_snake_case : Optional[Any] = OrderedDict()
_snake_case : Any = from_model.state_dict()
_snake_case : Union[str, Any] = list(from_model.state_dict().keys() )
_snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(__A ) , len(__A ) )
for i in range(len(__A ) ):
_snake_case : int = weights[og_keys[i]]
our_model.load_state_dict(__A )
_snake_case : Dict = torch.randn((2, 3, 2_24, 2_24) )
_snake_case : Optional[int] = from_model(__A )
_snake_case : Union[str, Any] = our_model(__A ).logits
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
_snake_case : List[Any] = name
print(__A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_snake_case : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def A__( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ):
_snake_case : Tuple = '''imagenet-1k-id2label.json'''
_snake_case : List[Any] = 10_00
_snake_case : List[str] = (1, num_labels)
_snake_case : int = '''huggingface/label-files'''
_snake_case : Any = num_labels
_snake_case : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_snake_case : Union[str, Any] = {int(__A ): v for k, v in idalabel.items()}
_snake_case : int = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : Optional[Any] = partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
_snake_case : Dict = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
_snake_case : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __A , names_to_config[model_name] , __A , __A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __A , __A , __A , __A )
return config, expected_shape
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : List[str] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def A__( __lowerCAmelCase ):
if not postfix_notation:
return 0
_snake_case : List[Any] = {'+', '-', '*', '/'}
_snake_case : int = []
for token in postfix_notation:
if token in operations:
_snake_case , _snake_case : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def A__( __lowerCAmelCase ):
if len(lowerCamelCase__ ) != 32:
raise ValueError('Input must be of length 32' )
_snake_case : Dict = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def A__( __lowerCAmelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
_snake_case : Dict = format(lowerCamelCase__ , '08x' )[-8:]
_snake_case : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def A__( __lowerCAmelCase ):
_snake_case : Tuple = b""
for char in message:
bit_string += format(lowerCamelCase__ , '08b' ).encode('utf-8' )
_snake_case : str = format(len(lowerCamelCase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCamelCase__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def A__( __lowerCAmelCase ):
if len(lowerCamelCase__ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowerCamelCase__ ) , 5_12 ):
_snake_case : Dict = bit_string[pos : pos + 5_12]
_snake_case : int = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def A__( __lowerCAmelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
_snake_case : Union[str, Any] = format(lowerCamelCase__ , '032b' )
_snake_case : Tuple = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCamelCase__ , 2 )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return (a + b) % 2**32
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def A__( __lowerCAmelCase ):
_snake_case : Any = preprocess(lowerCamelCase__ )
_snake_case : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Tuple = 0X6745_2301
_snake_case : str = 0Xefcd_ab89
_snake_case : Dict = 0X98ba_dcfe
_snake_case : str = 0X1032_5476
_snake_case : Optional[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCamelCase__ ):
_snake_case : str = aa
_snake_case : Optional[int] = ba
_snake_case : Optional[int] = ca
_snake_case : Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : Union[str, Any] = d ^ (b & (c ^ d))
_snake_case : Tuple = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Optional[int] = c ^ (d & (b ^ c))
_snake_case : Tuple = (5 * i + 1) % 16
elif i <= 47:
_snake_case : str = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : str = c ^ (b | not_aa(lowerCamelCase__ ))
_snake_case : Tuple = (7 * i) % 16
_snake_case : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : Tuple = d
_snake_case : Any = c
_snake_case : str = b
_snake_case : Any = sum_aa(lowerCamelCase__ , left_rotate_aa(lowerCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Dict = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
_snake_case : Union[str, Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
_snake_case : Optional[Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
_snake_case : Optional[Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
_snake_case : List[str] = reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A__( __lowerCAmelCase ):
if "img_encoder.pos_embed" in name:
_snake_case : List[str] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_snake_case : Dict = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_snake_case : Optional[int] = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_snake_case : Optional[Any] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_snake_case : List[Any] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_snake_case : str = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case : Tuple = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_snake_case : Union[str, Any] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_snake_case : Optional[Any] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_snake_case : Optional[int] = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_snake_case : Dict = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case : List[str] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_snake_case : Optional[Any] = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_snake_case : Any = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_snake_case : str = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_snake_case : Optional[int] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_snake_case : List[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_snake_case : Tuple = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_snake_case : Tuple = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_snake_case : Optional[Any] = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case : int = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_snake_case : Tuple = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_snake_case : Optional[Any] = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_snake_case : str = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def A__( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
_snake_case : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Any = key.split('.' )
_snake_case : List[str] = int(key_split[2] ), int(key_split[4] )
_snake_case : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : int = val[dim : dim * 2, :]
_snake_case : str = val[-dim:, :]
else:
_snake_case : Optional[int] = val[:dim]
_snake_case : str = val[dim : dim * 2]
_snake_case : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Optional[Any] = key.split('.' )
_snake_case : int = int(key_split[3] )
_snake_case : Tuple = config.text_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : str = val[
dim : dim * 2, :
]
_snake_case : Optional[int] = val[-dim:, :]
else:
_snake_case : str = val[:dim]
_snake_case : Optional[Any] = val[dim : dim * 2]
_snake_case : Optional[int] = val[-dim:]
else:
_snake_case : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case : Any = val.squeeze_()
else:
_snake_case : Union[str, Any] = val
return orig_state_dict
def A__( ):
_snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="groupvit-gcc-yfcc" , __lowerCAmelCase=False ):
_snake_case : List[str] = GroupViTConfig()
_snake_case : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_snake_case : Any = torch.load(_lowerCAmelCase , map_location='cpu' )["model"]
_snake_case : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_snake_case : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_snake_case : List[Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_snake_case : List[str] = prepare_img()
_snake_case : Any = processor(text=['a photo of a cat', 'a photo of a dog'] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt' )
with torch.no_grad():
_snake_case : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_snake_case : List[Any] = torch.tensor([[13.35_23, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case : Optional[Any] = torch.tensor([[16.18_73, 8.6_230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print('Successfully saved processor and model to' , _lowerCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(_lowerCAmelCase , organization='nielsr' )
model.push_to_hub(_lowerCAmelCase , organization='nielsr' )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowercase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.