code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = 101 ) -> List[str]:
'''simple docstring'''
__snake_case = length
def __len__( self ) -> Any:
'''simple docstring'''
return self.length
def __getitem__( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return i
class lowerCAmelCase :
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return {"input_ids": torch.tensor(__SCREAMING_SNAKE_CASE ), "labels": torch.tensor(__SCREAMING_SNAKE_CASE )}
class lowerCAmelCase ( nn.Module):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__snake_case = nn.Linear(120 , 80 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase ( __lowerCAmelCase):
@require_torch_neuroncore
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = F'''--output_dir {output_dir}'''.split()
__snake_case = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase ( __lowerCAmelCase):
@require_torch_multi_gpu
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = F'''--output_dir {output_dir}'''.split()
__snake_case = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((TrainingArguments,))
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
UpperCAmelCase_ : int = DummyDataset(dataset_length)
def _UpperCamelCase (_lowerCamelCase : EvalPrediction )-> Dict:
'''simple docstring'''
__snake_case = list(range(len(_lowerCamelCase ) ) )
__snake_case = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
UpperCAmelCase_ : Union[str, Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase_ : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : str = None
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> int:
'''simple docstring'''
__snake_case = size if size is not None else {'''height''': 20, '''width''': 20}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = size
__snake_case = do_normalize
__snake_case = do_convert_rgb
__snake_case = [512, 1024, 2048, 4096]
__snake_case = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__snake_case = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : str = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.image_processor_tester.prepare_dummy_image()
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = 2048
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__snake_case = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
__snake_case = '''Hello'''
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self , num_channels=4 )
__snake_case = 3
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
if gpta_config_file == "":
__snake_case = GPTaConfig()
else:
__snake_case = GPTaConfig.from_json_file(_lowerCamelCase )
__snake_case = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase_ : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] )-> list[int]:
'''simple docstring'''
__snake_case = True
__snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
order.append(_lowerCamelCase )
return order
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] )-> list[int]:
'''simple docstring'''
__snake_case = True
__snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return component
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] )-> list[list[int]]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase ) * [False]
__snake_case = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
__snake_case = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = []
__snake_case = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
__snake_case = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
__snake_case = find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = False
__lowercase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowercase : Union[str, Any] = ()
__lowercase : List[str] = {} if is_torch_available() else {}
__lowercase : List[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class lowerCAmelCase ( __lowerCAmelCase):
@slow
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )['''positions''']
__snake_case = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size if size is not None else {'''height''': 18, '''width''': 20}
__snake_case = do_thumbnail
__snake_case = do_align_axis
__snake_case = do_pad
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = DonutImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@is_flaky()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , '''num.txt''' )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
UpperCAmelCase_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCAmelCase_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCAmelCase)} , )
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase :
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__lowercase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__lowercase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''})
__lowercase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''})
__lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''})
__lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''})
__lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__lowercase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def _UpperCamelCase (_lowerCamelCase : DataTrainingArguments , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[str] = None , )-> int:
'''simple docstring'''
def _dataset(_lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , ref_path=_lowerCamelCase , )
return LineByLineTextDataset(tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case = AutoModelWithLMHead.from_config(_lowerCamelCase )
model.resize_token_embeddings(len(_lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , evaluate=_lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case = DataCollatorForWholeWordMask(
tokenizer=_lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__snake_case = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , data_collator=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , prediction_loss_only=_lowerCamelCase , )
# Training
if training_args.do_train:
__snake_case = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate()
__snake_case = math.exp(eval_output['''eval_loss'''] )
__snake_case = {'''perplexity''': perplexity}
__snake_case = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _lowerCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_lowerCamelCase )
return results
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : list[int] , _lowerCamelCase : int )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , )-> None:
'''simple docstring'''
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase_ : Union[str, Any] = [3, 3_4, 4, 1_2, 5, 2]
UpperCAmelCase_ : str = 9
UpperCAmelCase_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Tuple = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : str )-> str:
'''simple docstring'''
return " ".join(
''''''.join(word[::-1] ) if len(_lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__snake_case = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
__snake_case = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
__snake_case = []
__snake_case = []
__snake_case = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__snake_case = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ) -> BatchFeature:
'''simple docstring'''
__snake_case = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(__SCREAMING_SNAKE_CASE )}.''' )
__snake_case = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
__snake_case = [html_strings]
# Get nodes + xpaths
__snake_case = []
__snake_case = []
for html_string in html_strings:
__snake_case , __snake_case , __snake_case = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
__snake_case = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
__snake_case = {'''nodes''': nodes, '''xpaths''': xpaths}
__snake_case = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
__snake_case = SwinConfig()
__snake_case = swin_name.split('''_''' )
__snake_case = name_split[1]
__snake_case = int(name_split[4] )
__snake_case = int(name_split[3][-1] )
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "in22k" in swin_name:
__snake_case = 2_18_41
else:
__snake_case = 10_00
__snake_case = '''huggingface/label-files'''
__snake_case = '''imagenet-1k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def _UpperCamelCase (_lowerCamelCase : str )-> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name:
__snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case = '''encoder.''' + name
if "attn.proj" in name:
__snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__snake_case = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__snake_case = '''layernorm.weight'''
if name == "norm.bias":
__snake_case = '''layernorm.bias'''
if "head" in name:
__snake_case = name.replace('''head''' , '''classifier''' )
else:
__snake_case = '''swin.''' + name
return name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('''.''' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[3] )
__snake_case = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[
dim : dim * 2, :
]
__snake_case = val[-dim:, :]
else:
__snake_case = val[
:dim
]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[
-dim:
]
else:
__snake_case = val
return orig_state_dict
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Any )-> List[Any]:
'''simple docstring'''
__snake_case = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__snake_case = get_swin_config(_lowerCamelCase )
__snake_case = SwinForImageClassification(_lowerCamelCase )
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__snake_case = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
__snake_case = timm_model(inputs['''pixel_values'''] )
__snake_case = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = RemBertConfig.from_json_file(_lowerCamelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(_lowerCamelCase ) ) )
__snake_case = RemBertModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(_lowerCamelCase ) )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : Optional[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__snake_case = '''lm_head'''
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True )-> Optional[int]:
'''simple docstring'''
if config_path is not None:
__snake_case = UniSpeechConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case = UniSpeechConfig()
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load_from_json(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 42
__snake_case = 43
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case = WavaVecaPhonemeCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = UniSpeechForCTC(_lowerCamelCase )
else:
__snake_case = UniSpeechForPreTraining(_lowerCamelCase )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_unispeech.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__SCREAMING_SNAKE_CASE , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase_ : List[Any] = NewType('''DataClass''', Any)
UpperCAmelCase_ : Union[str, Any] = NewType('''DataClassType''', Any)
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> int:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _UpperCamelCase (_lowerCamelCase : list )-> Callable[[str], Any]:
'''simple docstring'''
__snake_case = {str(_lowerCamelCase ): choice for choice in choices}
return lambda _lowerCamelCase : str_to_choice.get(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (*,
_lowerCamelCase : Union[str, List[str]] = None , _lowerCamelCase : str = None , _lowerCamelCase : Any = dataclasses.MISSING , _lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , _lowerCamelCase : dict = None , **_lowerCamelCase : int , )-> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case = {}
if aliases is not None:
__snake_case = aliases
if help is not None:
__snake_case = help
return dataclasses.field(metadata=_lowerCamelCase , default=_lowerCamelCase , default_factory=_lowerCamelCase , **_lowerCamelCase )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Iterable[DataClassType]
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if "formatter_class" not in kwargs:
__snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**__SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(__SCREAMING_SNAKE_CASE ):
__snake_case = [dataclass_types]
__snake_case = list(__SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = F'''--{field.name}'''
__snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __SCREAMING_SNAKE_CASE ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [aliases]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__SCREAMING_SNAKE_CASE , '''UnionType''' ) and isinstance(__SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
__snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case = (
field.type.__args__[0] if isinstance(__SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case = {}
if origin_type is Literal or (isinstance(field.type , __SCREAMING_SNAKE_CASE ) and issubclass(field.type , __SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
__snake_case = field.type.__args__
else:
__snake_case = [x.value for x in field.type]
__snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__snake_case = field.default
else:
__snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case = copy(__SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
__snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__snake_case = True
elif isclass(__SCREAMING_SNAKE_CASE ) and issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = field.type.__args__[0]
__snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case = True
else:
__snake_case = field.type
if field.default is not dataclasses.MISSING:
__snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
else:
__snake_case = True
parser.add_argument(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , '''_argument_group_name''' ):
__snake_case = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case = self
try:
__snake_case = get_type_hints(__SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__SCREAMING_SNAKE_CASE ):
__snake_case = '''.'''.join(map(__SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__SCREAMING_SNAKE_CASE ):
if not field.init:
continue
__snake_case = type_hints[field.name]
self._parse_dataclass_field(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case = []
if args_filename:
args_files.append(Path(__SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case = ArgumentParser()
args_file_parser.add_argument(__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case = args_file_parser.parse_known_args(args=__SCREAMING_SNAKE_CASE )
__snake_case = vars(__SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip('''-''' ) , __SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(__SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
__snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case = self.parse_known_args(args=__SCREAMING_SNAKE_CASE )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init}
__snake_case = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = dtype(**__SCREAMING_SNAKE_CASE )
outputs.append(__SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
__snake_case = set(args.keys() )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init}
__snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case = dtype(**__SCREAMING_SNAKE_CASE )
outputs.append(__SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(__SCREAMING_SNAKE_CASE )}''' )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(__SCREAMING_SNAKE_CASE ) , encoding='''utf-8''' ) as open_json_file:
__snake_case = json.loads(open_json_file.read() )
__snake_case = self.parse_dict(__SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
__snake_case = self.parse_dict(yaml.safe_load(Path(__SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ : int = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig):
__lowercase : bool = None
__lowercase : bool = None
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilder):
__lowercase : List[Any] = datasets.Audio()
__lowercase : Optional[Any] = '''audio'''
__lowercase : List[str] = AudioFolderConfig
__lowercase : List[str] # definition at the bottom of the script
__lowercase : str = AudioClassification(audio_column='''audio''' , label_column='''label''')
UpperCAmelCase_ : Dict = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
UpperCAmelCase_ : Dict = AUDIO_EXTENSIONS
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
from random import randint, random
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : int = 5 , )-> list:
'''simple docstring'''
__snake_case = [[-1] * number_of_cells] # Create a highway without any car
__snake_case = 0
__snake_case = max(_lowerCamelCase , 0 )
while i < number_of_cells:
__snake_case = (
randint(0 , _lowerCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = 0
__snake_case = highway_now[car_index + 1 :]
for cell in range(len(_lowerCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCamelCase , -1 )
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : float , _lowerCamelCase : int )-> list:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
# Beforce calculations, the highway is empty
__snake_case = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__snake_case = min(highway_now[car_index] + 1 , _lowerCamelCase )
# Number of empty cell before the next car
__snake_case = get_distance(_lowerCamelCase , _lowerCamelCase ) - 1
# We can't have the car causing an accident
__snake_case = min(next_highway[car_index] , _lowerCamelCase )
if random() < probability:
# Randomly, a driver will slow down
__snake_case = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : int )-> list:
'''simple docstring'''
__snake_case = len(highway[0] )
for i in range(_lowerCamelCase ):
__snake_case = update(highway[i] , _lowerCamelCase , _lowerCamelCase )
__snake_case = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
__snake_case = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__snake_case = (car_index + speed) % number_of_cells
# Commit the change of position
__snake_case = speed
highway.append(_lowerCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Any = GPTaTokenizer
__lowercase : List[str] = GPTaTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Any = {'''add_prefix_space''': True}
__lowercase : List[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
# Testing tokenization
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
__snake_case = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
__snake_case = tokens + [rust_tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__snake_case = tokenizer.pad_token_id
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = tokenizer(*__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''$$$'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = tokenizer.bos_token_id
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case = tokenizer.decode(out_s.input_ids )
__snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = [self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__snake_case = '''Encode this.'''
__snake_case = '''This one too please.'''
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode_plus(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , )
__snake_case = encoded_sequence_dict['''input_ids''']
__snake_case = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
__snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__SCREAMING_SNAKE_CASE )
]
__snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
__snake_case = AutoTokenizer.from_pretrained('''./test_opt''' )
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''bos'''
__snake_case = tokenizer.get_vocab()['''bos''']
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(__SCREAMING_SNAKE_CASE , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
__snake_case = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [3_1957, 250, 1345, 9, 10, 4758] )
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Optional[Any] = ''''''
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Any = 1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ()-> None:
'''simple docstring'''
__snake_case , __snake_case = get_dataset(_lowerCamelCase , _lowerCamelCase )
print('''Processing...''' )
__snake_case , __snake_case , __snake_case = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case = random_chars(32 )
__snake_case = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__snake_case = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(_lowerCamelCase )} with {file_name}''' )
__snake_case = []
for anno in new_annos[index]:
__snake_case = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowerCamelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> tuple[list, list]:
'''simple docstring'''
__snake_case = []
__snake_case = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , '''*.txt''' ) ):
__snake_case = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__snake_case = in_file.readlines()
__snake_case = os.path.join(_lowerCamelCase , f'''{label_name}.jpg''' )
__snake_case = []
for obj_list in obj_lists:
__snake_case = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int = 1 )-> tuple[list, list, list]:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = []
for idx in range(len(_lowerCamelCase ) ):
__snake_case = []
__snake_case = img_list[idx]
path_list.append(_lowerCamelCase )
__snake_case = anno_list[idx]
__snake_case = cva.imread(_lowerCamelCase )
if flip_type == 1:
__snake_case = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase (_lowerCamelCase : int = 32 )-> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__snake_case = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_encoder_blocks''' ) )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , __SCREAMING_SNAKE_CASE=[1, 4, 8, 16] , __SCREAMING_SNAKE_CASE=[1, 2, 4, 8] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = sr_ratios
__snake_case = depths
__snake_case = hidden_sizes
__snake_case = downsampling_rates
__snake_case = num_attention_heads
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = SegformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = __snake_case = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = SegformerForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = 1
__snake_case = SegformerForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase : Union[str, Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = True
__lowercase : Union[str, Any] = False
__lowercase : Any = False
__lowercase : int = False
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = SegformerModelTester(self )
__snake_case = SegformerConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.attentions
__snake_case = sum(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__snake_case = (self.model_tester.image_size // 4) ** 2
__snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case = (self.model_tester.image_size // 32) ** 2
__snake_case = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(__SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__snake_case = (self.model_tester.image_size // 4) ** 2
__snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.hidden_states
__snake_case = self.model_tester.num_encoder_blocks
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__snake_case = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__snake_case = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = SegformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
__snake_case = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__SCREAMING_SNAKE_CASE )
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__snake_case = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
__snake_case = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__SCREAMING_SNAKE_CASE )
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__snake_case = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-1 ) )
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
__snake_case = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__SCREAMING_SNAKE_CASE )
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__snake_case = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = outputs.logits.detach().cpu()
__snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
__snake_case = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
__snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
__snake_case = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Optional[Any] = '''src/diffusers'''
UpperCAmelCase_ : Tuple = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : List[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Any = spec.loader.load_module()
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : int )-> Optional[Any]:
'''simple docstring'''
return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _lowerCamelCase ) is not None
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[str]:
'''simple docstring'''
__snake_case = object_name.split('''.''' )
__snake_case = 0
# First let's find the module where our object lives.
__snake_case = parts[i]
while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase , f'''{module}.py''' ) ):
i += 1
if i < len(_lowerCamelCase ):
__snake_case = os.path.join(_lowerCamelCase , parts[i] )
if i >= len(_lowerCamelCase ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_lowerCamelCase , f'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case = f.readlines()
# Now let's find the class / func in the code!
__snake_case = ''''''
__snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__snake_case = line_index
while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] , _lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case = lines[start_index:line_index]
return "".join(_lowerCamelCase )
UpperCAmelCase_ : Optional[int] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCAmelCase_ : Dict = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCAmelCase_ : Any = re.compile(R'''<FILL\s+[^>]*>''')
def _UpperCamelCase (_lowerCamelCase : str )-> Optional[Any]:
'''simple docstring'''
__snake_case = code.split('''\n''' )
__snake_case = 0
while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_lowerCamelCase ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
__snake_case = len(get_indent(_lowerCamelCase ) ) > 0
if has_indent:
__snake_case = f'''class Bla:\n{code}'''
__snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_lowerCamelCase )
__snake_case = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
__snake_case , __snake_case = style_docstrings_in_code(_lowerCamelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Dict=False )-> Tuple:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase ):
__snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__snake_case , __snake_case , __snake_case = search.groups()
__snake_case = find_code_in_diffusers(_lowerCamelCase )
__snake_case = get_indent(_lowerCamelCase )
__snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
__snake_case = theoretical_indent
__snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__snake_case = True
while line_index < len(_lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase ):
break
__snake_case = lines[line_index]
__snake_case = _should_continue(_lowerCamelCase , _lowerCamelCase ) and re.search(f'''^{indent}# End copy''' , _lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case = lines[start_index:line_index]
__snake_case = ''''''.join(_lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__snake_case = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_lowerCamelCase ) is None]
__snake_case = '''\n'''.join(_lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase ) > 0:
__snake_case = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__snake_case = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__snake_case , __snake_case , __snake_case = pattern.groups()
__snake_case = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if option.strip() == "all-casing":
__snake_case = re.sub(obja.lower() , obja.lower() , _lowerCamelCase )
__snake_case = re.sub(obja.upper() , obja.upper() , _lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__snake_case = blackify(lines[start_index - 1] + theoretical_code )
__snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
__snake_case = start_index + 1
if overwrite and len(_lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCamelCase )
return diffs
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = glob.glob(os.path.join(_lowerCamelCase , '''**/*.py''' ) , recursive=_lowerCamelCase )
__snake_case = []
for filename in all_files:
__snake_case = is_copy_consistent(_lowerCamelCase , _lowerCamelCase )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_lowerCamelCase ) > 0:
__snake_case = '''\n'''.join(_lowerCamelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Any = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase (_lowerCamelCase : str = "matrix.txt" )-> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(_lowerCamelCase ) , _lowerCamelCase ) ) as in_file:
__snake_case = in_file.read()
__snake_case = [[int(_lowerCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
__snake_case = [[0 for cell in row] for row in grid]
__snake_case = len(grid[0] )
__snake_case = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
__snake_case = grid[0][0]
for i in range(1 , _lowerCamelCase ):
__snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , _lowerCamelCase ):
__snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , _lowerCamelCase ):
for j in range(1 , _lowerCamelCase ):
__snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
import sys
UpperCAmelCase_ : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = 1
for digit in s:
product *= int(_lowerCamelCase )
return product
def _UpperCamelCase (_lowerCamelCase : str = N )-> int:
'''simple docstring'''
__snake_case = -sys.maxsize - 1
__snake_case = n[:13]
__snake_case = 13
while cur_index < len(_lowerCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__snake_case = substr[1:] + n[cur_index]
cur_index += 1
else:
__snake_case = max(_lowerCamelCase , str_eval(_lowerCamelCase ) )
__snake_case = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : str = ['''input_features''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=1_6000 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = num_mel_bins
__snake_case = do_ceptral_normalize
__snake_case = normalize_means
__snake_case = normalize_vars
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
__snake_case = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__snake_case = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
__snake_case = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
__snake_case = x[:input_length].mean(axis=0 )
__snake_case = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
__snake_case = x[:input_length].std(axis=0 )
__snake_case = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
__snake_case = padding_value
# make sure array is in float32
__snake_case = x.astype(np.floataa )
return x
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
'''simple docstring'''
__snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__snake_case = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__snake_case = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [raw_speech]
# extract fbank features
__snake_case = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
__snake_case = BatchFeature({'''input_features''': features} )
__snake_case = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
__snake_case = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
__snake_case = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__snake_case = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__snake_case = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
__snake_case = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : list[float] , _lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_lowerCamelCase ):
print(f'''{i}\t\t{d}''' )
def _UpperCamelCase (_lowerCamelCase : list[dict[str, int]] , _lowerCamelCase : list[float] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
for j in range(_lowerCamelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _UpperCamelCase (_lowerCamelCase : list[dict[str, int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> list[float]:
'''simple docstring'''
__snake_case = [float('''inf''' )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowerCamelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = int(input('''Enter number of vertices: ''').strip())
UpperCAmelCase_ : List[str] = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCAmelCase_ : Optional[Any] = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCAmelCase_ : Optional[Any] = int(input('''\nEnter shortest path source:''').strip())
UpperCAmelCase_ : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = VQModel
__lowercase : str = '''sample'''
@property
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=(32, 32) ) -> Dict:
'''simple docstring'''
__snake_case = 4
__snake_case = 3
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__SCREAMING_SNAKE_CASE )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__snake_case = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__snake_case = image.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : list[int] , _lowerCamelCase : list[int] )-> None:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
__snake_case = 0
print(_lowerCamelCase , end=''',''' )
# Consider rest of the activities
for j in range(_lowerCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowerCamelCase , end=''',''' )
__snake_case = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : int = [1, 3, 0, 5, 8, 5]
UpperCAmelCase_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] )-> Dict:
'''simple docstring'''
__snake_case = BertConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase (_lowerCamelCase : int = 1_00 )-> int:
'''simple docstring'''
return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = StableDiffusionSAGPipeline
__lowercase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Optional[int] = False
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
__snake_case = output.images
assert image.shape == (1, 512, 768, 3)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[int] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCamelCase (_lowerCamelCase : list[list[int]] )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
for i in range(len(_lowerCamelCase ) ):
__snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCamelCase )
return next_generation
def _UpperCamelCase (_lowerCamelCase : list[list[int]] , _lowerCamelCase : int )-> list[Image.Image]:
'''simple docstring'''
__snake_case = []
for _ in range(_lowerCamelCase ):
# Create output image
__snake_case = Image.new('''RGB''' , (len(cells[0] ), len(_lowerCamelCase )) )
__snake_case = img.load()
# Save cells to image
for x in range(len(_lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
__snake_case = 2_55 - cells[y][x] * 2_55
__snake_case = (colour, colour, colour)
# Save image
images.append(_lowerCamelCase )
__snake_case = new_generation(_lowerCamelCase )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Dict = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ : Any = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase_ : str = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase_ : Dict = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase_ : Tuple = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase_ : int = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 10, 100] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3.0 ) -> List[str]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__SCREAMING_SNAKE_CASE ) as executor:
__snake_case = []
__snake_case = Counter()
__snake_case = 0
__snake_case = defaultdict(__SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
__snake_case = candidate + '''\n''' + test_case
__snake_case = (test_program, timeout, task_id, completion_id[task_id])
__snake_case = executor.submit(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
futures.append(__SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__SCREAMING_SNAKE_CASE ):
__snake_case = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__snake_case , __snake_case = [], []
for result in results.values():
result.sort()
__snake_case = [r[1]['''passed'''] for r in result]
total.append(len(__SCREAMING_SNAKE_CASE ) )
correct.append(sum(__SCREAMING_SNAKE_CASE ) )
__snake_case = np.array(__SCREAMING_SNAKE_CASE )
__snake_case = np.array(__SCREAMING_SNAKE_CASE )
__snake_case = k
__snake_case = {F'''pass@{k}''': estimate_pass_at_k(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple )-> int:
'''simple docstring'''
def estimator(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case = itertools.repeat(_lowerCamelCase , len(_lowerCamelCase ) )
else:
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__snake_case = iter(_lowerCamelCase )
return np.array([estimator(int(_lowerCamelCase ) , int(_lowerCamelCase ) , _lowerCamelCase ) for n, c in zip(_lowerCamelCase , _lowerCamelCase )] )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase (_lowerCamelCase : Namespace )-> List[str]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ : str = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
__snake_case = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__snake_case = model_type
__snake_case = tf_checkpoint
__snake_case = pytorch_dump_output
__snake_case = config
__snake_case = finetuning_task_name
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
__snake_case = self._tf_checkpoint
__snake_case = ''''''
else:
__snake_case = self._tf_checkpoint
__snake_case = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , __SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module):
__lowercase : int
__lowercase : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case = hidden_states.shape
__snake_case = jax.image.resize(
__SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__snake_case = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase ( nn.Module):
__lowercase : int
__lowercase : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase ( nn.Module):
__lowercase : int
__lowercase : int = None
__lowercase : float = 0.0
__lowercase : bool = None
__lowercase : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.in_channels if self.out_channels is None else self.out_channels
__snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
__snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case = nn.Dropout(self.dropout_prob )
__snake_case = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case = None
if use_nin_shortcut:
__snake_case = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ) -> int:
'''simple docstring'''
__snake_case = hidden_states
__snake_case = self.norma(__SCREAMING_SNAKE_CASE )
__snake_case = nn.swish(__SCREAMING_SNAKE_CASE )
__snake_case = self.conva(__SCREAMING_SNAKE_CASE )
__snake_case = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) )
__snake_case = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 )
__snake_case = hidden_states + temb
__snake_case = self.norma(__SCREAMING_SNAKE_CASE )
__snake_case = nn.swish(__SCREAMING_SNAKE_CASE )
__snake_case = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.conva(__SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
__snake_case = self.conv_shortcut(__SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict )-> Optional[Any]:
'''simple docstring'''
if index == r:
for j in range(_lowerCamelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case = arr[i]
combination_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] )-> List[Any]:
'''simple docstring'''
__snake_case = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 , _lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase_ : str = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = FunnelConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = IFPipeline
__lowercase : Optional[int] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__snake_case = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__snake_case , __snake_case = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__snake_case = None
__snake_case = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__snake_case = IFImgaImgPipeline(**pipe_a.components )
__snake_case = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__snake_case = IFInpaintingPipeline(**pipe_a.components )
__snake_case = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ()-> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Any:
'''simple docstring'''
return getitem, k
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : int )-> Union[str, Any]:
'''simple docstring'''
return setitem, k, v
def _UpperCamelCase (_lowerCamelCase : str )-> Any:
'''simple docstring'''
return delitem, k
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : str , *_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
try:
return fun(_lowerCamelCase , *_lowerCamelCase ), None
except Exception as e:
return None, e
UpperCAmelCase_ : str = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
UpperCAmelCase_ : Tuple = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
UpperCAmelCase_ : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
UpperCAmelCase_ : Optional[Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
UpperCAmelCase_ : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase_ : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _UpperCamelCase (_lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = HashMap(initial_block_size=4 )
__snake_case = {}
for _, (fun, *args) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = _run_operation(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
__snake_case , __snake_case = _run_operation(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
assert my_res == py_res
assert str(_lowerCamelCase ) == str(_lowerCamelCase )
assert set(_lowerCamelCase ) == set(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
def is_public(_lowerCamelCase : str ) -> bool:
return not name.startswith('''_''' )
__snake_case = {name for name in dir({} ) if is_public(_lowerCamelCase )}
__snake_case = {name for name in dir(HashMap() ) if is_public(_lowerCamelCase )}
assert dict_public_names > hash_public_names
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__snake_case = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
__snake_case = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(__SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = '''This is a test string'''
__snake_case = processor(text=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=(2, 10, 16) , __SCREAMING_SNAKE_CASE=77 ) -> Tuple:
'''simple docstring'''
np.random.seed(__SCREAMING_SNAKE_CASE )
return np.random.rand(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(__SCREAMING_SNAKE_CASE )
__snake_case = decoder.decode_beams(__SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE )
else:
with get_context(__SCREAMING_SNAKE_CASE ).Pool() as pool:
__snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = list(__SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.logit_score )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.lm_score )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -20.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
__SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , )
__snake_case = decoded_processor_out.text
__snake_case = list(__SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -20.0
__snake_case = True
__snake_case = processor.batch_decode(
__SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , )
__snake_case = decoded_processor_out.text
__snake_case = list(__SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , )
with get_context('''fork''' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __SCREAMING_SNAKE_CASE )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
__snake_case = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__snake_case = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = processor_auto(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(__SCREAMING_SNAKE_CASE )
__snake_case = processor_auto.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
__snake_case = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__SCREAMING_SNAKE_CASE )
__snake_case = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(__SCREAMING_SNAKE_CASE )
__snake_case = next(__SCREAMING_SNAKE_CASE )
__snake_case = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__snake_case = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=__SCREAMING_SNAKE_CASE )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__snake_case = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''start_time''' ) )
__snake_case = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''end_time''' ) )
# fmt: off
__snake_case = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__snake_case = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase_ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _UpperCamelCase (_lowerCamelCase : str )-> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(_lowerCamelCase , _lowerCamelCase )
return k
def _UpperCamelCase (_lowerCamelCase : dict , _lowerCamelCase : dict )-> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(_lowerCamelCase )
__snake_case = PegasusConfig(**_lowerCamelCase )
__snake_case = PegasusForConditionalGeneration(_lowerCamelCase )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(_lowerCamelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(_lowerCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__snake_case = mapping['''shared.weight''']
__snake_case = mapping['''shared.weight''']
__snake_case = {k: torch.zeros_like(_lowerCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_lowerCamelCase )
__snake_case , __snake_case = torch_model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
__snake_case = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase (_lowerCamelCase : int="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(_lowerCamelCase )
__snake_case = {}
__snake_case = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_lowerCamelCase , desc='''converting tf checkpoint to dict''' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
__snake_case = array
return tf_weights
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> List[Any]:
'''simple docstring'''
__snake_case = Path(_lowerCamelCase ).parent.name
__snake_case = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
__snake_case = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_lowerCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCamelCase )
# convert model
__snake_case = get_tf_weights_as_numpy(_lowerCamelCase )
__snake_case = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(_lowerCamelCase , _lowerCamelCase )
torch_model.save_pretrained(_lowerCamelCase )
__snake_case = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_lowerCamelCase , Path(_lowerCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase_ : str = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase_ : Optional[int] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase_ : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Optional[Any] = GPTSanJapaneseTokenizer
__lowercase : int = False
__lowercase : List[str] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
__snake_case = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__snake_case = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__snake_case = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = '''こんにちは、世界。 こんばんは、㔺界。'''
__snake_case = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__snake_case = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__snake_case = '''こんにちは、世界。'''
__snake_case = '''こんばんは、㔺界。😀'''
__snake_case = '''こんにちは、世界。こんばんは、世界。😀'''
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__snake_case = '''こんにちは、世界。'''
__snake_case = '''こんばんは、㔺界。😀'''
__snake_case = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
__snake_case = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__snake_case = tokenizer.encode('''あンいワ''' )
__snake_case = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
__snake_case = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__snake_case = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
# fmt: off
__snake_case = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_heads''' ) )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[16, 48, 96] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 10] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = is_training
__snake_case = use_labels
__snake_case = num_labels
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = stride_kv
__snake_case = depth
__snake_case = cls_token
__snake_case = attention_drop_rate
__snake_case = initializer_range
__snake_case = layer_norm_eps
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = CvtModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = (self.image_size, self.image_size)
__snake_case , __snake_case = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = CvtForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : int = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : str = False
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = CvtModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.hidden_states
__snake_case = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = CvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + '''/grid.txt''' ) as f:
__snake_case = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCamelCase ) for x in f.readline().split()] )
__snake_case = 0
# right
for i in range(20 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__snake_case = temp
# down
for i in range(17 ):
for j in range(20 ):
__snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__snake_case = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__snake_case = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__snake_case = temp
return maximum
if __name__ == "__main__":
print(solution())
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : str = range(2, 2_0 + 1)
UpperCAmelCase_ : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Dict )-> str:
'''simple docstring'''
__snake_case = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) )
__snake_case = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) )
__snake_case , __snake_case = 0, 0
__snake_case = n - i
__snake_case = memo.get(_lowerCamelCase )
if sub_memo is not None:
__snake_case = sub_memo.get(_lowerCamelCase )
if jumps is not None and len(_lowerCamelCase ) > 0:
# find and make the largest jump without going over
__snake_case = -1
for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__snake_case = _k
break
if max_jump >= 0:
__snake_case , __snake_case , __snake_case = jumps[max_jump]
# since the difference between jumps is cached, add c
__snake_case = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ):
__snake_case , __snake_case = divmod(_lowerCamelCase , 10 )
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case = []
else:
__snake_case = {c: []}
__snake_case = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__snake_case , __snake_case = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__snake_case , __snake_case = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
__snake_case = sub_memo[c]
# keep jumps sorted by # of terms skipped
__snake_case = 0
while j < len(_lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict )-> Any:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__snake_case = i
__snake_case , __snake_case , __snake_case = 0, 0, 0
for j in range(len(_lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__snake_case = ds_c + ds_b
diff += addend
__snake_case = 0
for j in range(_lowerCamelCase ):
__snake_case = a_i[j] + addend
__snake_case , __snake_case = divmod(_lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return diff, i - start_i
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any )-> str:
'''simple docstring'''
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case = digits[j] + addend
if s >= 10:
__snake_case , __snake_case = divmod(_lowerCamelCase , 10 )
__snake_case = addend // 10 + quotient
else:
__snake_case = s
__snake_case = addend // 10
if addend == 0:
break
while addend > 0:
__snake_case , __snake_case = divmod(_lowerCamelCase , 10 )
digits.append(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int = 10**15 )-> int:
'''simple docstring'''
__snake_case = [1]
__snake_case = 1
__snake_case = 0
while True:
__snake_case , __snake_case = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__snake_case = 0
for j in range(len(_lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : set )-> int:
'''simple docstring'''
__snake_case , __snake_case = len(_lowerCamelCase ), len(grid[0] )
if (
min(_lowerCamelCase , _lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case = 0
count += depth_first_search(_lowerCamelCase , row + 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , row - 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col + 1 , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col - 1 , _lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
__lowercase : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
__lowercase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase :
__lowercase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''})
__lowercase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__lowercase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowercase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
__snake_case = import_module('''tasks''' )
try:
__snake_case = getattr(_lowerCamelCase , model_args.task_type )
__snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case = token_classification_task.get_labels(data_args.labels )
__snake_case = dict(enumerate(_lowerCamelCase ) )
__snake_case = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case = np.argmax(_lowerCamelCase , axis=2 )
__snake_case , __snake_case = preds.shape
__snake_case = [[] for _ in range(_lowerCamelCase )]
__snake_case = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
__snake_case , __snake_case = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase , _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase , _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase , _lowerCamelCase ),
}
# Data collator
__snake_case = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate()
__snake_case = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
__snake_case = TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case , __snake_case , __snake_case = trainer.predict(_lowerCamelCase )
__snake_case , __snake_case = align_predictions(_lowerCamelCase , _lowerCamelCase )
__snake_case = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__snake_case = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return results
def _UpperCamelCase (_lowerCamelCase : Tuple )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase (_lowerCamelCase : tuple )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case = [7, 11, 13, 17]
for i, test in enumerate(_lowerCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase (_lowerCamelCase : int = 10 )-> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowerCamelCase , _lowerCamelCase ) ) )
for num in permutations(range(_lowerCamelCase ) )
if is_substring_divisible(_lowerCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[Any]:
'''simple docstring'''
__snake_case = botoa.client('''iam''' )
__snake_case = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_lowerCamelCase , AssumeRolePolicyDocument=json.dumps(_lowerCamelCase , indent=2 ) )
__snake_case = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_lowerCamelCase , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(_lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def _UpperCamelCase (_lowerCamelCase : Any )-> Any:
'''simple docstring'''
__snake_case = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_lowerCamelCase )["Role"]["Arn"]
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _lowerCamelCase , )
__snake_case = None
if credentials_configuration == 0:
__snake_case = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__snake_case = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__snake_case = _ask_field('''AWS Access Key ID: ''' )
__snake_case = aws_access_key_id
__snake_case = _ask_field('''AWS Secret Access Key: ''' )
__snake_case = aws_secret_access_key
__snake_case = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__snake_case = aws_region
__snake_case = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _lowerCamelCase , )
if role_management == 0:
__snake_case = _ask_field('''Enter your IAM role name: ''' )
else:
__snake_case = '''accelerate_sagemaker_execution_role'''
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(_lowerCamelCase )
__snake_case = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
__snake_case = None
if is_custom_docker_image:
__snake_case = _ask_field('''Enter your Docker image: ''' , lambda _lowerCamelCase : str(_lowerCamelCase ).lower() )
__snake_case = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
__snake_case = None
if is_sagemaker_inputs_enabled:
__snake_case = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda _lowerCamelCase : str(_lowerCamelCase ).lower() , )
__snake_case = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
__snake_case = None
if is_sagemaker_metrics_enabled:
__snake_case = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda _lowerCamelCase : str(_lowerCamelCase ).lower() , )
__snake_case = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__snake_case = {}
__snake_case = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__snake_case = '''dynamo_'''
__snake_case = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__snake_case = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__snake_case = _ask_options(
'''Which mode do you want to use?''' , _lowerCamelCase , lambda _lowerCamelCase : TORCH_DYNAMO_MODES[int(_lowerCamelCase )] , default='''default''' , )
__snake_case = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
__snake_case = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='''Please enter yes or no.''' , )
__snake_case = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__snake_case = _ask_options(
_lowerCamelCase , _lowerCamelCase , lambda _lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__snake_case = _ask_field(_lowerCamelCase , lambda _lowerCamelCase : str(_lowerCamelCase ).lower() , default='''ml.p3.2xlarge''' )
__snake_case = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__snake_case = _ask_field(
'''How many machines do you want use? [1]: ''' , _lowerCamelCase , default=1 , )
__snake_case = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCamelCase , use_cpu=_lowerCamelCase , dynamo_config=_lowerCamelCase , eca_instance_type=_lowerCamelCase , profile=_lowerCamelCase , region=_lowerCamelCase , iam_role_name=_lowerCamelCase , mixed_precision=_lowerCamelCase , num_machines=_lowerCamelCase , sagemaker_inputs_file=_lowerCamelCase , sagemaker_metrics_file=_lowerCamelCase , )
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
__snake_case = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__snake_case = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
__snake_case = self.get_masked_index(__SCREAMING_SNAKE_CASE )
__snake_case = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
__snake_case = self.framework
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__snake_case = self.model(**__SCREAMING_SNAKE_CASE )
__snake_case = model_inputs['''input_ids''']
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
__snake_case = target_ids.shape[0]
__snake_case = model_outputs['''input_ids'''][0]
__snake_case = model_outputs['''logits''']
if self.framework == "tf":
__snake_case = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__snake_case = outputs.numpy()
__snake_case = outputs[0, masked_index, :]
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
__snake_case = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
__snake_case = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
__snake_case = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = topk.values.numpy(), topk.indices.numpy()
else:
__snake_case = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__snake_case = outputs[0, masked_index, :]
__snake_case = logits.softmax(dim=-1 )
if target_ids is not None:
__snake_case = probs[..., target_ids]
__snake_case , __snake_case = probs.topk(__SCREAMING_SNAKE_CASE )
__snake_case = []
__snake_case = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__snake_case = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__snake_case = input_ids.numpy().copy()
if target_ids is not None:
__snake_case = target_ids[p].tolist()
__snake_case = p
# Filter padding out:
__snake_case = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__snake_case = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [targets]
try:
__snake_case = self.tokenizer.get_vocab()
except Exception:
__snake_case = {}
__snake_case = []
for target in targets:
__snake_case = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__snake_case = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__snake_case = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__snake_case = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = {}
if targets is not None:
__snake_case = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = target_ids
if top_k is not None:
__snake_case = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : str = ['''vqvae''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , vqvae=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ) else 1000
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
__snake_case = steps or self.get_default_steps()
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__snake_case = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__snake_case = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__snake_case = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__SCREAMING_SNAKE_CASE , device=self.device , )
__snake_case = noise
__snake_case = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.mel.audio_slice_to_image(__SCREAMING_SNAKE_CASE )
__snake_case = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__snake_case = (input_image / 255) * 2 - 1
__snake_case = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__snake_case = self.vqvae.encode(torch.unsqueeze(__SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=__SCREAMING_SNAKE_CASE )[0]
__snake_case = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__snake_case = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
__snake_case = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__snake_case = int(mask_start_secs * pixels_per_second )
__snake_case = int(mask_end_secs * pixels_per_second )
__snake_case = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __SCREAMING_SNAKE_CASE ):
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
else:
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
__snake_case = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
else:
__snake_case = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__snake_case = mask[:, step, :, :mask_start]
if mask_end > 0:
__snake_case = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__snake_case = 1 / self.vqvae.config.scaling_factor * images
__snake_case = self.vqvae.decode(__SCREAMING_SNAKE_CASE )['''sample''']
__snake_case = (images / 2 + 0.5).clamp(0 , 1 )
__snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__snake_case = (images * 255).round().astype('''uint8''' )
__snake_case = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__SCREAMING_SNAKE_CASE , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__snake_case = [self.mel.image_to_audio(__SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(__SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__snake_case = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__snake_case = (sample / 255) * 2 - 1
__snake_case = torch.Tensor(__SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__snake_case = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__snake_case = self.scheduler.alphas_cumprod[t]
__snake_case = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__snake_case = 1 - alpha_prod_t
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
__snake_case = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__snake_case = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__snake_case = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = acos(torch.dot(torch.flatten(__SCREAMING_SNAKE_CASE ) , torch.flatten(__SCREAMING_SNAKE_CASE ) ) / torch.norm(__SCREAMING_SNAKE_CASE ) / torch.norm(__SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(__SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : Optional[Any] = '''nat'''
__lowercase : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = kernel_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = layer_scale_init_value
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase_ : Any = {
'''n_samples''': 6_4,
'''horizon''': 3_2,
'''num_inference_steps''': 2_0,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
UpperCAmelCase_ : int = '''hopper-medium-v2'''
UpperCAmelCase_ : Optional[Any] = gym.make(env_name)
UpperCAmelCase_ : Tuple = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
UpperCAmelCase_ : Union[str, Any] = env.reset()
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = 1_0_0_0
UpperCAmelCase_ : Any = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase_ : Optional[int] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = env.step(denorm_actions)
UpperCAmelCase_ : Union[str, Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase_ : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''marian'''
__lowercase : Any = ['''past_key_values''']
__lowercase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __SCREAMING_SNAKE_CASE=5_8101 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = decoder_vocab_size or vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase ( __lowerCAmelCase):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__snake_case = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__snake_case = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__snake_case = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCAmelCase_ : Any = (7_2_0, 1_2_8_0) # Height, Width
UpperCAmelCase_ : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCAmelCase_ : Tuple = 1 / 1_0_0
UpperCAmelCase_ : Dict = ''''''
UpperCAmelCase_ : Tuple = ''''''
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Optional[Any] = 2_5_0
def _UpperCamelCase ()-> None:
'''simple docstring'''
__snake_case , __snake_case = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__snake_case = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__snake_case , __snake_case , __snake_case = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case = random_chars(32 )
__snake_case = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__snake_case = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__snake_case = []
for anno in new_annos:
__snake_case = anno[3] - anno[1]
__snake_case = anno[4] - anno[2]
__snake_case = anno[1] + width / 2
__snake_case = anno[2] + height / 2
__snake_case = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowerCamelCase )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> tuple[list, list]:
'''simple docstring'''
__snake_case = []
__snake_case = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , '''*.txt''' ) ):
__snake_case = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__snake_case = in_file.readlines()
__snake_case = os.path.join(_lowerCamelCase , f'''{label_name}.jpg''' )
__snake_case = []
for obj_list in obj_lists:
__snake_case = obj_list.rstrip('''\n''' ).split(''' ''' )
__snake_case = float(obj[1] ) - float(obj[3] ) / 2
__snake_case = float(obj[2] ) - float(obj[4] ) / 2
__snake_case = float(obj[1] ) + float(obj[3] ) / 2
__snake_case = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list[int] , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : float = 0.0 , )-> tuple[list, list, str]:
'''simple docstring'''
__snake_case = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__snake_case = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__snake_case = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__snake_case = int(scale_x * output_size[1] )
__snake_case = int(scale_y * output_size[0] )
__snake_case = []
__snake_case = []
for i, index in enumerate(_lowerCamelCase ):
__snake_case = all_img_list[index]
path_list.append(_lowerCamelCase )
__snake_case = all_annos[index]
__snake_case = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__snake_case = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__snake_case = img
for bbox in img_annos:
__snake_case = bbox[1] * scale_x
__snake_case = bbox[2] * scale_y
__snake_case = bbox[3] * scale_x
__snake_case = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__snake_case = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__snake_case = img
for bbox in img_annos:
__snake_case = scale_x + bbox[1] * (1 - scale_x)
__snake_case = bbox[2] * scale_y
__snake_case = scale_x + bbox[3] * (1 - scale_x)
__snake_case = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__snake_case = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__snake_case = img
for bbox in img_annos:
__snake_case = bbox[1] * scale_x
__snake_case = scale_y + bbox[2] * (1 - scale_y)
__snake_case = bbox[3] * scale_x
__snake_case = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__snake_case = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__snake_case = img
for bbox in img_annos:
__snake_case = scale_x + bbox[1] * (1 - scale_x)
__snake_case = scale_y + bbox[2] * (1 - scale_y)
__snake_case = scale_x + bbox[3] * (1 - scale_x)
__snake_case = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__snake_case = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _UpperCamelCase (_lowerCamelCase : int )-> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__snake_case = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
create_all_state(1 , _lowerCamelCase , _lowerCamelCase , [] , _lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : list[list[int]] , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase , total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1 , _lowerCamelCase , level - 1 , _lowerCamelCase , _lowerCamelCase )
current_list.pop()
def _UpperCamelCase (_lowerCamelCase : list[list[int]] )-> None:
'''simple docstring'''
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 * 4 , __SCREAMING_SNAKE_CASE=32 * 6 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=32 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = is_training
__snake_case = use_auxiliary_loss
__snake_case = num_queries
__snake_case = num_channels
__snake_case = min_size
__snake_case = max_size
__snake_case = num_labels
__snake_case = mask_feature_size
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
__snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
__snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
__snake_case = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
__snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.prepare_config_and_inputs()
__snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = output.encoder_hidden_states
__snake_case = output.pixel_decoder_hidden_states
__snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
with torch.no_grad():
__snake_case = MaskFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__snake_case = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
__snake_case = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Any = False
__lowercase : Optional[Any] = False
__lowercase : Optional[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = MaskFormerModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
__snake_case = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = (self.model_tester.min_size,) * 2
__snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
'''class_labels''': torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
__snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE )
__snake_case = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__snake_case = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__snake_case = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__snake_case = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
__snake_case = True
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__snake_case = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
__snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : List[str] = 1E-4
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
__snake_case = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
__snake_case = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
__snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
__snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
__snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
__snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
__snake_case = self.default_image_processor
__snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__snake_case = inputs['''pixel_values'''].to(__SCREAMING_SNAKE_CASE )
__snake_case = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''mask_labels''']]
__snake_case = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''class_labels''']]
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCAmelCase_ : Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
UpperCAmelCase_ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
UpperCAmelCase_ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
UpperCAmelCase_ : Any = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
UpperCAmelCase_ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
UpperCAmelCase_ : Any = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
UpperCAmelCase_ : Optional[Any] = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case , __snake_case = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
__snake_case = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
__snake_case , __snake_case = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _UpperCamelCase (_lowerCamelCase : int = 1_00 )-> Any:
'''simple docstring'''
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : int )-> Any:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict )-> Union[str, Any]:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[Any] )-> int:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] )-> List[Any]:
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[Any] )-> Tuple:
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
__snake_case = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
__snake_case = poker_hands.copy()
shuffle(_lowerCamelCase )
__snake_case = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _UpperCamelCase ()-> List[str]:
'''simple docstring'''
__snake_case = PokerHand('''2C 4S AS 3D 5C''' )
__snake_case = True
__snake_case = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = 0
__snake_case = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
__snake_case = os.path.join(_lowerCamelCase , '''poker_hands.txt''' )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
__snake_case = line[:14].strip()
__snake_case = line[15:].strip()
__snake_case , __snake_case = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
__snake_case = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import random
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Dict )-> Dict:
'''simple docstring'''
__snake_case = a[left_index]
__snake_case = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
__snake_case , __snake_case = a[i], a[j]
i += 1
__snake_case , __snake_case = a[i - 1], a[left_index]
return i - 1
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
if left < right:
__snake_case = random.randint(_lowerCamelCase , right - 1 )
__snake_case , __snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = input('''Enter numbers separated by a comma:\n''' ).strip()
__snake_case = [int(_lowerCamelCase ) for item in user_input.split(''',''' )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase_ : Optional[Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__snake_case = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
__snake_case = f.readlines()
__snake_case = []
for line in triangle:
__snake_case = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import math
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
__snake_case = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
__snake_case = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__snake_case = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : Dict = [int(item) for item in user_input.split(''',''')]
UpperCAmelCase_ : int = int(input('''Enter the number to be searched:\n'''))
UpperCAmelCase_ : str = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"""Number {x} is at index {res}""")
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
__snake_case = data
__snake_case = previous
__snake_case = next_node
def __str__( self ) -> str:
'''simple docstring'''
return F'''{self.data}'''
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self.data
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.next
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self.previous
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = head
def __iter__( self ) -> Any:
'''simple docstring'''
return self
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__snake_case = self.current.get_data()
__snake_case = self.current.get_next()
return value
class lowerCAmelCase :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = None # First node in list
__snake_case = None # Last node in list
def __str__( self ) -> Any:
'''simple docstring'''
__snake_case = self.head
__snake_case = []
while current is not None:
nodes.append(current.get_data() )
__snake_case = current.get_next()
return " ".join(str(__SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.head
while current:
if current.get_data() == value:
return True
__snake_case = current.get_next()
return False
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.head is None:
__snake_case = node
__snake_case = node
else:
self.insert_before_node(self.head , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = Node(__SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.set_tail(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = node
__snake_case = node.previous
if node.get_previous() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = node
__snake_case = node.next
if node.get_next() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = 1
__snake_case = Node(__SCREAMING_SNAKE_CASE )
__snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return
current_position += 1
__snake_case = node.next
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Node:
'''simple docstring'''
__snake_case = self.head
while node:
if node.get_data() == item:
return node
__snake_case = node.get_next()
raise Exception('''Node not found''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if (node := self.get_node(__SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
__snake_case = self.head.get_next()
if node == self.tail:
__snake_case = self.tail.get_previous()
self.remove_node_pointers(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if node.get_next():
__snake_case = node.previous
if node.get_previous():
__snake_case = node.next
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.head is None
def _UpperCamelCase ()-> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> List[Any]:
'''simple docstring'''
return x + 2
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''x = 3'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
__snake_case = '''x = y'''
__snake_case = {'''y''': 5}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 5, '''y''': 5} )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''y = add_two(x)'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''x = 3'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''x = 3\ny = 5'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''text = f\'This is x: {x}.\''''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 2} )
__snake_case = {'''x''': 8}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 8, '''y''': 5} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''test_list = [x, add_two(x)]'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''y = x'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 3} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
__snake_case = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''x = 0\nfor i in range(3):\n x = i'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''range''': range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 2, '''i''': 2} )
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
import random
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : Union[str, Any] )-> tuple:
'''simple docstring'''
__snake_case , __snake_case , __snake_case = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCamelCase )
elif element > pivot:
greater.append(_lowerCamelCase )
else:
equal.append(_lowerCamelCase )
return less, equal, greater
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : int )-> int:
'''simple docstring'''
if index >= len(_lowerCamelCase ) or index < 0:
return None
__snake_case = items[random.randint(0 , len(_lowerCamelCase ) - 1 )]
__snake_case = 0
__snake_case , __snake_case , __snake_case = _partition(_lowerCamelCase , _lowerCamelCase )
__snake_case = len(_lowerCamelCase )
__snake_case = len(_lowerCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCamelCase , _lowerCamelCase )
# must be in larger
else:
return quick_select(_lowerCamelCase , index - (m + count) )
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = '''gpt_neo'''
__lowercase : Optional[int] = ['''past_key_values''']
__lowercase : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , __SCREAMING_SNAKE_CASE=5_0257 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=5_0256 , __SCREAMING_SNAKE_CASE=5_0256 , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(__SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] )-> Tuple:
'''simple docstring'''
import torch
__snake_case = input.size()
__snake_case = len(_lowerCamelCase )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , _lowerCamelCase , _lowerCamelCase )
__snake_case = torch.div(sizedim - size , _lowerCamelCase , rounding_mode='''floor''' ) + 1
__snake_case = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None]
__snake_case = [slice(_lowerCamelCase )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> str:
'''simple docstring'''
import torch
__snake_case = torch.arange(1 , _lowerCamelCase )
__snake_case = torch.remainder(_lowerCamelCase , _lowerCamelCase )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(_lowerCamelCase )
return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode='''floor''' )
class lowerCAmelCase ( __lowerCAmelCase):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
__snake_case = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self._config.num_heads
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__snake_case = common_inputs['''attention_mask''']
if self.use_past:
__snake_case = ordered_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__snake_case = initial_learning_rate
__snake_case = warmup_steps
__snake_case = power
__snake_case = decay_schedule_fn
__snake_case = name
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__snake_case = tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa )
__snake_case = tf.cast(self.warmup_steps , tf.floataa )
__snake_case = global_step_float / warmup_steps_float
__snake_case = self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1E-8 , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : Optional[List[str]] = None , )-> List[str]:
'''simple docstring'''
__snake_case = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCamelCase , )
if num_warmup_steps:
__snake_case = WarmUp(
initial_learning_rate=_lowerCamelCase , decay_schedule_fn=_lowerCamelCase , warmup_steps=_lowerCamelCase , )
if weight_decay_rate > 0.0:
__snake_case = AdamWeightDecay(
learning_rate=_lowerCamelCase , weight_decay_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_lowerCamelCase , )
else:
__snake_case = tf.keras.optimizers.Adam(
learning_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = 0.001 , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = 0.999 , __SCREAMING_SNAKE_CASE = 1E-7 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = weight_decay_rate
__snake_case = include_in_weight_decay
__snake_case = exclude_from_weight_decay
@classmethod
def lowerCAmelCase ( cls , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = {'''WarmUp''': WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls ).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE , self )._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = list(zip(*__SCREAMING_SNAKE_CASE ) )
return super(__SCREAMING_SNAKE_CASE , self ).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__snake_case = apply_state or {}
__snake_case = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__snake_case = self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE )
__snake_case = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tf.control_dependencies([decay] ):
return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE )
__snake_case = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tf.control_dependencies([decay] ):
return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None:
return False
return True
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> int:
'''simple docstring'''
__snake_case = []
__snake_case = None
@property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._accum_steps is None:
__snake_case = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not self._gradients:
__snake_case = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__SCREAMING_SNAKE_CASE ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__SCREAMING_SNAKE_CASE )}''' )
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE )
self._accum_steps.assign_add(1 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE ) )
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
UpperCAmelCase_ : Optional[int] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
UpperCAmelCase_ : Union[str, Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : Dict = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : str = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[Any] = '''umt5'''
__lowercase : Union[str, Any] = ['''past_key_values''']
def __init__( self , __SCREAMING_SNAKE_CASE=25_0112 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gated-gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="T5Tokenizer" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=__SCREAMING_SNAKE_CASE , tokenizer_class=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = vocab_size
__snake_case = d_model
__snake_case = d_kv
__snake_case = d_ff
__snake_case = num_layers
__snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case = num_heads
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = initializer_factor
__snake_case = feed_forward_proj
__snake_case = use_cache
__snake_case = self.feed_forward_proj.split('''-''' )
__snake_case = act_info[-1]
__snake_case = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
__snake_case = '''gelu_new'''
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.num_heads
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.num_layers
class lowerCAmelCase ( __lowerCAmelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__snake_case = '''past_encoder_sequence + sequence'''
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 5E-4
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple )-> Union[str, Any]:
'''simple docstring'''
__snake_case = StableDiffusionPipeline.from_pretrained(_lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__snake_case = load_file(_lowerCamelCase )
__snake_case = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
__snake_case = pipeline.text_encoder
else:
__snake_case = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
__snake_case = pipeline.unet
# find the target layer
__snake_case = layer_infos.pop(0 )
while len(_lowerCamelCase ) > -1:
try:
__snake_case = curr_layer.__getattr__(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__snake_case = layer_infos.pop(0 )
elif len(_lowerCamelCase ) == 0:
break
except Exception:
if len(_lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__snake_case = layer_infos.pop(0 )
__snake_case = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(_lowerCamelCase )
else:
pair_keys.append(_lowerCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__snake_case = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__snake_case = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCamelCase , _lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__snake_case = state_dict[pair_keys[0]].to(torch.floataa )
__snake_case = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCamelCase , _lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCamelCase )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : int = args.base_model_path
UpperCAmelCase_ : Dict = args.checkpoint_path
UpperCAmelCase_ : Dict = args.dump_path
UpperCAmelCase_ : str = args.lora_prefix_unet
UpperCAmelCase_ : Dict = args.lora_prefix_text_encoder
UpperCAmelCase_ : Dict = args.alpha
UpperCAmelCase_ : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ : Dict = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ : List[Any] = 5_0_0_0_0
UpperCAmelCase_ : Optional[Any] = 5_0_0_0
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = os.path.split(__file__)
UpperCAmelCase_ : Tuple = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] )-> Optional[int]:
'''simple docstring'''
for i in range(_lowerCamelCase ):
__snake_case = dataset[i]
@get_duration
def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> str:
'''simple docstring'''
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
__snake_case = dataset[i : i + batch_size]
@get_duration
def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : int , _lowerCamelCase : Dict )-> str:
'''simple docstring'''
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(_lowerCamelCase ):
__snake_case = dataset[i]
@get_duration
def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
__snake_case = dataset[i : i + batch_size]
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__snake_case = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
__snake_case = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__snake_case = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__snake_case = generate_example_dataset(
os.path.join(_lowerCamelCase , '''dataset.arrow''' ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={'''list''': (1_00,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCamelCase ) )
__snake_case = func(_lowerCamelCase , **_lowerCamelCase )
print('''shuffling dataset''' )
__snake_case = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(_lowerCamelCase ) )
__snake_case = func(
_lowerCamelCase , **_lowerCamelCase )
with open(_lowerCamelCase , '''wb''' ) as f:
f.write(json.dumps(_lowerCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] )-> Optional[Any]:
'''simple docstring'''
__snake_case = [1]
for i in range(2 , _lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__snake_case = []
__snake_case = list(range(_lowerCamelCase ) )
# Find permutation
while factorials:
__snake_case = factorials.pop()
__snake_case , __snake_case = divmod(_lowerCamelCase , _lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (_lowerCamelCase : Union[dict, list, tuple, torch.Tensor] )-> List[Tuple[int, ...]]:
'''simple docstring'''
__snake_case = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...] )-> Tuple[int, ...]:
'''simple docstring'''
__snake_case = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
__snake_case = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , )-> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(_lowerCamelCase : List[bool] ) -> None:
__snake_case = True
for i in range(len(_lowerCamelCase ) ):
__snake_case = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case = l[reversed_idx]
if start_edges is None:
__snake_case = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
__snake_case = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__snake_case = []
__snake_case = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
__snake_case = tuple(_lowerCamelCase )
__snake_case = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> torch.Tensor:
'''simple docstring'''
__snake_case = t.shape[:no_batch_dims]
__snake_case = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
__snake_case = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
__snake_case = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
__snake_case = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase (_lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , )-> Any:
'''simple docstring'''
if not (len(_lowerCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
__snake_case = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
__snake_case = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case = tensor_tree_map(_prep_inputs , _lowerCamelCase )
__snake_case = None
if _out is not None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__snake_case = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case = 0
__snake_case = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
__snake_case = _select_chunk
else:
__snake_case = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
__snake_case = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
__snake_case = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ) -> List[Any]:
'''simple docstring'''
__snake_case = max_chunk_size
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__snake_case = [c for c in candidates if c > min_chunk_size]
__snake_case = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
__snake_case = 0
__snake_case = len(__SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
__snake_case = test_chunk_size(candidates[i] )
if not viable:
__snake_case = (min_viable_chunk_size_index + i) // 2
else:
__snake_case = i
__snake_case = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = True
for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
__snake_case = True
__snake_case = tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE )
__snake_case = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
__snake_case = False
if not consistent:
__snake_case = self._determine_favorable_chunk_size(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__snake_case = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=0.9995 )-> Union[str, Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , np.ndarray ):
__snake_case = True
__snake_case = va.device
__snake_case = va.cpu().numpy()
__snake_case = va.cpu().numpy()
__snake_case = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
__snake_case = (1 - t) * va + t * va
else:
__snake_case = np.arccos(_lowerCamelCase )
__snake_case = np.sin(_lowerCamelCase )
__snake_case = theta_a * t
__snake_case = np.sin(_lowerCamelCase )
__snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case = sin_theta_t / sin_theta_a
__snake_case = sa * va + sa * va
if inputs_are_torch:
__snake_case = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Dict )-> Union[str, Any]:
'''simple docstring'''
__snake_case = F.normalize(_lowerCamelCase , dim=-1 )
__snake_case = F.normalize(_lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> int:
'''simple docstring'''
for param in model.parameters():
__snake_case = value
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , clip_model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , coca_model=__SCREAMING_SNAKE_CASE , coca_tokenizer=__SCREAMING_SNAKE_CASE , coca_transform=__SCREAMING_SNAKE_CASE , )
__snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , __SCREAMING_SNAKE_CASE )
else feature_extractor.size['''shortest_edge''']
)
__snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __SCREAMING_SNAKE_CASE )
set_requires_grad(self.clip_model , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
set_requires_grad(self.vae , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
set_requires_grad(self.vae , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
set_requires_grad(self.unet , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
set_requires_grad(self.unet , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__snake_case = min(int(num_inference_steps * strength ) , __SCREAMING_SNAKE_CASE )
__snake_case = max(num_inference_steps - init_timestep , 0 )
__snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE )}''' )
__snake_case = image.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__SCREAMING_SNAKE_CASE )
]
__snake_case = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
else:
__snake_case = self.vae.encode(__SCREAMING_SNAKE_CASE ).latent_dist.sample(__SCREAMING_SNAKE_CASE )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 0.18_215 * init_latents
__snake_case = init_latents.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
__snake_case = randn_tensor(init_latents.shape , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
# get latents
__snake_case = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = init_latents
return latents
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = self.coca_transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.feature_extractor.preprocess(__SCREAMING_SNAKE_CASE )
__snake_case = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case = self.clip_model.get_image_features(__SCREAMING_SNAKE_CASE )
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__SCREAMING_SNAKE_CASE )
__snake_case = image_embeddings_clip.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
__snake_case = latents.detach().requires_grad_()
__snake_case = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case = self.scheduler.alphas_cumprod[timestep]
__snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case = torch.sqrt(__SCREAMING_SNAKE_CASE )
__snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
__snake_case = self.scheduler.sigmas[index]
__snake_case = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.18_215 * sample
__snake_case = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = transforms.Resize(self.feature_extractor_size )(__SCREAMING_SNAKE_CASE )
__snake_case = self.normalize(__SCREAMING_SNAKE_CASE ).to(latents.dtype )
__snake_case = self.clip_model.get_image_features(__SCREAMING_SNAKE_CASE )
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__SCREAMING_SNAKE_CASE )
__snake_case = spherical_dist_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() * clip_guidance_scale
__snake_case = -torch.autograd.grad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
__snake_case = latents.detach() + grads * (sigma**2)
__snake_case = noise_pred_original
else:
__snake_case = noise_pred_original - torch.sqrt(__SCREAMING_SNAKE_CASE ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 0.6 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 100 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0.8 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , ) -> Any:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__SCREAMING_SNAKE_CASE )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__SCREAMING_SNAKE_CASE , torch.Generator ) and batch_size > 1:
__snake_case = [generator] + [None] * (batch_size - 1)
__snake_case = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__snake_case = [x[0] for x in coca_is_none if x[1]]
__snake_case = ''', '''.join(__SCREAMING_SNAKE_CASE )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case = self.get_image_description(__SCREAMING_SNAKE_CASE )
if style_prompt is None:
if len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case = self.get_image_description(__SCREAMING_SNAKE_CASE )
# get prompt text embeddings for content and style
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case = slerp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# duplicate text embeddings for each generation per prompt
__snake_case = text_embeddings.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
# set timesteps
__snake_case = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case = {}
if accepts_offset:
__snake_case = 1
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case , __snake_case = self.get_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = timesteps[:1].repeat(__SCREAMING_SNAKE_CASE )
# Preprocess image
__snake_case = preprocess(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.prepare_latents(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , __SCREAMING_SNAKE_CASE )
__snake_case = preprocess(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.prepare_latents(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , __SCREAMING_SNAKE_CASE )
__snake_case = slerp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if clip_guidance_scale > 0:
__snake_case = self.get_clip_image_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_clip_image_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = slerp(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = content_text_input.input_ids.shape[-1]
__snake_case = self.tokenizer([''''''] , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case = uncond_embeddings.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device='''cpu''' , dtype=__SCREAMING_SNAKE_CASE ).to(
self.device )
else:
__snake_case = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
# check if the scheduler accepts generator
__snake_case = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case = generator
with self.progress_bar(total=__SCREAMING_SNAKE_CASE ):
for i, t in enumerate(__SCREAMING_SNAKE_CASE ):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2 )
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case = self.cond_fn(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.18_215 * latents
__snake_case = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__SCREAMING_SNAKE_CASE , nsfw_content_detected=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
for param in module.parameters():
__snake_case = False
def _UpperCamelCase ()-> List[str]:
'''simple docstring'''
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__snake_case = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
__snake_case = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = datetime.now()
__snake_case = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
@abstractmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = '''ylacombe/bark-small'''
__snake_case = tempfile.mkdtemp()
__snake_case = '''en_speaker_1'''
__snake_case = '''This is a test string'''
__snake_case = '''speaker_embeddings_path.json'''
__snake_case = '''speaker_embeddings'''
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__snake_case = 35
__snake_case = 2
__snake_case = 8
__snake_case = {
'''semantic_prompt''': np.ones(__SCREAMING_SNAKE_CASE ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__snake_case = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__snake_case = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
__snake_case = processor(text=self.input_string )
__snake_case = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , torch_builtin(__SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , gelu_new(__SCREAMING_SNAKE_CASE ) ) )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case = get_activation('''gelu''' )
__snake_case = get_activation('''gelu_10''' )
__snake_case = torch_builtin(__SCREAMING_SNAKE_CASE )
__snake_case = geluaa(__SCREAMING_SNAKE_CASE )
__snake_case = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__SCREAMING_SNAKE_CASE ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation('''bogus''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = get_activation('''gelu''' )
__snake_case = 1
__snake_case = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = acta.a
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Tuple = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Union[str, Any] = '''UperNetConfig'''
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
__snake_case = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
__snake_case = nn.ReLU()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = self.conv(__SCREAMING_SNAKE_CASE )
__snake_case = self.batch_norm(__SCREAMING_SNAKE_CASE )
__snake_case = self.activation(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = input
for layer in self.layers:
__snake_case = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = pool_scales
__snake_case = align_corners
__snake_case = in_channels
__snake_case = channels
__snake_case = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[torch.Tensor]:
'''simple docstring'''
__snake_case = []
for ppm in self.blocks:
__snake_case = ppm(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case = config
__snake_case = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case = in_channels
__snake_case = config.hidden_size
__snake_case = False
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case = nn.ModuleList()
__snake_case = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
__snake_case = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
__snake_case = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = inputs[-1]
__snake_case = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
__snake_case = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
__snake_case = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
__snake_case = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = laterals[i - 1].shape[2:]
__snake_case = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__snake_case = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__snake_case = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
__snake_case = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
__snake_case = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = config
__snake_case = config.auxiliary_in_channels
__snake_case = config.auxiliary_channels
__snake_case = config.auxiliary_num_convs
__snake_case = config.auxiliary_concat_input
__snake_case = in_index
__snake_case = (kernel_size // 2) * dilation
__snake_case = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
__snake_case = nn.Identity()
else:
__snake_case = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
__snake_case = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = encoder_hidden_states[self.in_index]
__snake_case = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
__snake_case = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = UperNetConfig
__lowercase : Optional[int] = '''pixel_values'''
__lowercase : Tuple = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = value
UpperCAmelCase_ : Union[str, Any] = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase_ : int = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __lowerCAmelCase , )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
__snake_case = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
__snake_case = outputs.feature_maps
__snake_case = self.decode_head(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
__snake_case = None
if self.auxiliary_head is not None:
__snake_case = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
__snake_case = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__snake_case = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case = (logits,) + outputs[1:]
else:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : str = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Tuple = '''gpt_neox'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_0432 , __SCREAMING_SNAKE_CASE=6144 , __SCREAMING_SNAKE_CASE=44 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_4576 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=1_0000 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = rotary_pct
__snake_case = rotary_emb_base
__snake_case = attention_dropout
__snake_case = hidden_dropout
__snake_case = classifier_dropout
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = use_cache
__snake_case = tie_word_embeddings
__snake_case = use_parallel_residual
__snake_case = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__snake_case = self.rope_scaling.get('''type''' , __SCREAMING_SNAKE_CASE )
__snake_case = self.rope_scaling.get('''factor''' , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''projector.weight''']
__snake_case = downstream_dict['''projector.bias''']
__snake_case = downstream_dict['''model.post_net.linear.weight''']
__snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''model.linear.weight''']
__snake_case = downstream_dict['''model.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] )-> Tuple:
'''simple docstring'''
__snake_case = WavaVecaForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''connector.weight''']
__snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )
__snake_case = checkpoint['''Downstream''']
__snake_case = WavaVecaConfig.from_pretrained(_lowerCamelCase )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForXVector''' ):
__snake_case = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(args=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = path_or_paths
__snake_case = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else '''train'''
__snake_case = features
__snake_case = cache_dir
__snake_case = keep_in_memory
__snake_case = streaming
__snake_case = num_proc
__snake_case = kwargs
@abstractmethod
def lowerCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = features
__snake_case = cache_dir
__snake_case = keep_in_memory
__snake_case = streaming
__snake_case = num_proc
__snake_case = kwargs
@abstractmethod
def lowerCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.